1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-22 20:33:17 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nick Craig-Wood
e14109e1b4 convmv: update help text - FIXME WIP
need a help line for each conversion mode
2025-03-10 18:33:43 +00:00
nielash
d08543070a convmv command WIP 2025-03-10 18:33:42 +00:00
180 changed files with 2657 additions and 8585 deletions

View File

@@ -226,8 +226,6 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install Go
id: setup-go
@@ -291,10 +289,6 @@ jobs:
- name: Scan for vulnerabilities
run: govulncheck ./...
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py
if: github.event_name == 'pull_request'
android:
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
timeout-minutes: 30

View File

@@ -572,19 +572,3 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
## Keeping a backend or command out of tree
Rclone was designed to be modular so it is very easy to keep a backend
or a command out of the main rclone source tree.
So for example if you had a backend which accessed your proprietary
systems or a command which was specialised for your needs you could
add them out of tree.
This may be easier than using a plugin and is supported on all
platforms not just macOS and Linux.
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
which has an example of an out of tree backend `ram` (which is a
renamed version of the `memory` backend).

View File

@@ -1,4 +1,20 @@
<div align="center">
<sup>Special thanks to our sponsor:</sup>
<br>
<br>
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
<div>
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
</div>
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
<div>
<sup>Visit warp.dev to learn more.</sup>
</div>
</a>
<br>
<hr>
</div>
<br>
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)

View File

@@ -44,7 +44,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -1378,7 +1378,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
containerName, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(containerName, directory, prefix string, addContainer bool) error {
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@@ -31,8 +31,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
@@ -918,7 +918,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
last := ""
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
@@ -1087,7 +1086,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return cachedEntries, nil
}
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
@@ -1139,7 +1138,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.recurse(ctx, dir, list)
if err != nil {
return err

View File

@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache

View File

@@ -356,8 +356,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.ListR = nil // Recursive listing may cause chunker skip files
f.features.ListP = nil // ListP not supported yet
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}

View File

@@ -46,7 +46,6 @@ func TestIntegration(t *testing.T) {
"DirCacheFlush",
"UserInfo",
"Disconnect",
"ListP",
},
}
if *fstest.RemoteName == "" {

View File

@@ -18,7 +18,7 @@ type CloudinaryEncoder interface {
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string, string) string
ToStandardName(string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}

View File

@@ -8,9 +8,7 @@ import (
"fmt"
"io"
"net/http"
"net/url"
"path"
"slices"
"strconv"
"strings"
"time"
@@ -105,39 +103,19 @@ func init() {
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
{
Name: "adjust_media_files_extensions",
Default: true,
Advanced: true,
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
},
{
Name: "media_extensions",
Default: []string{
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
Advanced: true,
Help: "Cloudinary supported media extensions",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
MediaExtensions []string `config:"media_extensions"`
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
}
// Fs represents a remote cloudinary server
@@ -225,18 +203,6 @@ func (f *Fs) FromStandardPath(s string) string {
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(s)
ext := ""
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
s = strings.TrimSuffix(parsedURL.Path, ext)
}
}
}
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
@@ -246,20 +212,8 @@ func (f *Fs) ToStandardPath(s string) string {
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string, assetURL string) string {
ext := ""
if f.opt.AdjustMediaFilesExtensions {
parsedURL, err := url.Parse(assetURL)
if err != nil {
fs.Logf(nil, "Error parsing URL: %v", err)
} else {
ext = path.Ext(parsedURL.Path)
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
ext = ""
}
}
}
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
}
// FromStandardFullPath encodes a full path to Cloudinary standard
@@ -377,7 +331,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
}
for _, asset := range results.Assets {
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
o := &Object{
fs: f,
remote: remote,

View File

@@ -20,7 +20,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"golang.org/x/sync/errgroup"
@@ -266,9 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
}
// Enable ListP always
features.ListP = f.ListP
// Enable Purge when any upstreams support it
if features.Purge == nil {
for _, u := range f.upstreams {
@@ -813,52 +809,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
if f.root == "" && dir == "" {
entries := make(fs.DirEntries, 0, len(f.upstreams))
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d)
}
return callback(entries)
return entries, nil
}
u, uRemote, err := f.findUpstream(dir)
if err != nil {
return err
return nil, err
}
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := u.wrapEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = u.f.List(ctx, uRemote)
if err != nil {
return nil, err
}
listP := u.f.Features().ListP
if listP == nil {
entries, err := u.f.List(ctx, uRemote)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return u.wrapEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
@@ -209,8 +208,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
if !operations.CanServerSideMove(wrappedFs) {
f.features.Disable("PutStream")
}
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -355,39 +352,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
// found.
// List entries and process them
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.processEntries(entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, dir)
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return f.processEntries(entries)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
)
// Globals
@@ -294,9 +293,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// Enable ListP always
f.features.ListP = f.ListP
return f, err
}
@@ -420,40 +416,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.encryptEntries(ctx, entries)
if err != nil {
return err
}
return callback(entries)
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
if err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
encryptedDir := f.cipher.EncryptDirName(dir)
if listP == nil {
entries, err := f.Fs.List(ctx, encryptedDir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, encryptedDir, wrappedCallback)
return f.encryptEntries(ctx, entries)
}
// ListR lists the objects and directories of the Fs starting

View File

@@ -38,8 +38,8 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
}
var updateMetadata updateMetadataFn
if len(metadata) > 0 {
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
if err != nil {
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
}
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
}
dirID = actualID(dirID)
updateInfo := &drive.File{}
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
if err != nil {
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
}
@@ -2189,7 +2189,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
wg := sync.WaitGroup{}
in := make(chan listREntry, listRInputBuffer)
out := make(chan error, f.ci.Checkers)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0

View File

@@ -507,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
//
// It returns a callback which should be called to finish the updates
// after the data is uploaded.
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
callbackFns := []updateMetadataFn{}
callback = func(ctx context.Context, info *drive.File) error {
for _, fn := range callbackFns {
@@ -532,9 +532,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
}
switch k {
case "copy-requires-writer-permission":
if isFolder {
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
return nil, err
}
case "writers-can-share":
@@ -631,7 +629,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
if err != nil {
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
}
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
if err != nil {
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
}

View File

@@ -25,7 +25,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
@@ -734,7 +734,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
// implementation of ListR
func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) {
func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
@@ -820,7 +820,7 @@ func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err erro
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.listR(ctx, dir, list)
if err != nil {
return err

View File

@@ -35,7 +35,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -845,7 +845,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@@ -43,7 +43,6 @@ var (
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
errRemove = errors.New("google photos API only implements removing files from albums")
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
errReadOnly = errors.New("can't upload files in read only mode")
)
const (
@@ -53,31 +52,19 @@ const (
listChunks = 100 // chunk size to read directory listings
albumChunks = 50 // chunk size to read album listings
minSleep = 10 * time.Millisecond
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
scopeAccess = 2 // position of access scope in list
)
var (
// scopes needed for read write access
scopesReadWrite = []string{
"openid",
"profile",
scopeAppendOnly,
scopeReadOnly,
scopeReadWrite,
}
// scopes needed for read only access
scopesReadOnly = []string{
"openid",
"profile",
scopeReadOnly,
}
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: scopesReadWrite,
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID,
@@ -113,9 +100,9 @@ func init() {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes = scopesReadOnly
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
} else {
oauthConfig.Scopes = scopesReadWrite
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
@@ -180,7 +167,7 @@ listings and won't be transferred.`,
The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing.
However if you use the gphotosdl proxy then you can download original,
However if you ue the gphotosdl proxy tnen you can download original,
unchanged images.
This runs a headless browser in the background.
@@ -1133,9 +1120,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
if !album.IsWriteable {
if o.fs.opt.ReadOnly {
return errReadOnly
}
return errOwnAlbums
}

View File

@@ -18,7 +18,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/kv"
)
@@ -183,9 +182,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
// Enable ListP always
f.features.ListP = f.ListP
cache.PinUntilFinalized(f.Fs, f)
return f, err
}
@@ -241,39 +237,10 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
// List the objects and directories in dir into entries.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
wrappedCallback := func(entries fs.DirEntries) error {
entries, err := f.wrapEntries(entries)
if err != nil {
return err
}
return callback(entries)
if entries, err = f.Fs.List(ctx, dir); err != nil {
return nil, err
}
listP := f.Fs.Features().ListP
if listP == nil {
entries, err := f.Fs.List(ctx, dir)
if err != nil {
return err
}
return wrappedCallback(entries)
}
return listP(ctx, dir, wrappedCallback)
return f.wrapEntries(entries)
}
// ListR lists the objects and directories recursively into out.

View File

@@ -180,6 +180,7 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
@@ -248,14 +249,6 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
f.httpClient = client
f.endpoint = u
f.endpointURL = u.String()
if isFile {
// Correct root if definitely pointing to a file
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return isFile, nil
}

View File

@@ -252,14 +252,18 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
}
resp, err := d.icloud.srv.Call(ctx, opts)
// icloud has some weird http codes
if err != nil && resp != nil && resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
if err != nil {
// icloud has some weird http codes
if resp.StatusCode == 330 {
loc, err := resp.Location()
if err == nil {
return d.DownloadFile(ctx, loc.String(), opt)
}
}
return resp, err
}
return resp, err
return d.icloud.srv.Call(ctx, opts)
}
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
@@ -627,7 +631,7 @@ func NewUpdateFileInfo() UpdateFileInfo {
FileFlags: FileFlags{
IsExecutable: true,
IsHidden: false,
IsWritable: true,
IsWritable: false,
},
}
}

View File

@@ -31,7 +31,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -1264,7 +1264,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "liststream")
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {

View File

@@ -17,7 +17,7 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
)
@@ -383,7 +383,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
entries := fs.DirEntries{}
listR := func(bucket, directory, prefix string, addBucket bool) error {
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {

View File

@@ -28,7 +28,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
)
@@ -516,7 +516,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return fs.ErrorDirNotFound
}
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
for resumeStart := u.Path; resumeStart != ""; {
var files []File
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)

View File

@@ -396,57 +396,10 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
return nil
}
// Order the permissions so that any with users come first.
//
// This is to work around a quirk with Graph:
//
// 1. You are adding permissions for both a group and a user.
// 2. The user is a member of the group.
// 3. The permissions for the group and user are the same.
// 4. You are adding the group permission before the user permission.
//
// When all of the above are true, Graph indicates it has added the
// user permission, but it immediately drops it
//
// See: https://github.com/rclone/rclone/issues/8465
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
// Return true if identity has any user permissions
hasUserIdentity := func(identity *api.IdentitySet) bool {
if identity == nil {
return false
}
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
}
// Return true if p has any user permissions
hasUser := func(p *api.PermissionsType) bool {
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
return true
}
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
if hasUserIdentity(identity) {
return true
}
}
return false
}
// Put Permissions with a user first, leaving unsorted otherwise
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
aHasUser := hasUser(a)
bHasUser := hasUser(b)
if aHasUser && !bHasUser {
return -1
} else if !aHasUser && bHasUser {
return 1
}
return 0
})
}
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
new, old := m.queuedPermissions, m.permissions
if len(old) == 0 || m.permsAddOnly {
m.orderPermissions(new)
return new, nil, nil // they must all be "add"
}
@@ -494,9 +447,6 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
remove = append(remove, o)
}
}
m.orderPermissions(add)
m.orderPermissions(update)
m.orderPermissions(remove)
return add, update, remove
}

View File

@@ -1,125 +0,0 @@
package onedrive
import (
"encoding/json"
"testing"
"github.com/rclone/rclone/backend/onedrive/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOrderPermissions(t *testing.T) {
tests := []struct {
name string
input []*api.PermissionsType
expected []string
}{
{
name: "empty",
input: []*api.PermissionsType{},
expected: []string(nil),
},
{
name: "users first, then group, then none",
input: []*api.PermissionsType{
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
{ID: "4"},
},
expected: []string{"2", "3", "1", "4"},
},
{
name: "same type unsorted",
input: []*api.PermissionsType{
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
},
expected: []string{"c", "b", "a"},
},
{
name: "all user identities",
input: []*api.PermissionsType{
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
},
expected: []string{"c", "a", "b"},
},
{
name: "no user or group info",
input: []*api.PermissionsType{
{ID: "z"},
{ID: "x"},
{ID: "y"},
},
expected: []string{"z", "x", "y"},
},
}
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
t.Run(driveType, func(t *testing.T) {
for _, tt := range tests {
m := &Metadata{fs: &Fs{driveType: driveType}}
t.Run(tt.name, func(t *testing.T) {
if driveType == driveTypeBusiness {
for i := range tt.input {
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
tt.input[i].GrantedTo = nil
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
tt.input[i].GrantedToIdentities = nil
}
}
m.orderPermissions(tt.input)
var gotIDs []string
for _, p := range tt.input {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, tt.expected, gotIDs)
})
}
})
}
}
func TestOrderPermissionsJSON(t *testing.T) {
testJSON := `[
{
"id": "1",
"grantedToV2": {
"group": {
"id": "group@example.com"
}
},
"roles": [
"write"
]
},
{
"id": "2",
"grantedToV2": {
"user": {
"id": "user@example.com"
}
},
"roles": [
"write"
]
}
]`
var testPerms []*api.PermissionsType
err := json.Unmarshal([]byte(testJSON), &testPerms)
require.NoError(t, err)
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
m.orderPermissions(testPerms)
var gotIDs []string
for _, p := range testPerms {
gotIDs = append(gotIDs, p.ID)
}
assert.Equal(t, []string{"2", "1"}, gotIDs)
}

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@@ -56,7 +55,6 @@ const (
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.Mebi
chunkSizeMultiple = 320 * fs.Kibi
maxSinglePartSize = 4 * fs.Mebi
regionGlobal = "global"
regionUS = "us"
@@ -139,21 +137,6 @@ func init() {
Help: "Azure and Office 365 operated by Vnet Group in China",
},
},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
This is disabled by default as uploading using single part uploads
causes rclone to use twice the storage on Onedrive business as when
rclone sets the modification time after the upload Onedrive creates a
new version.
See: https://github.com/rclone/rclone/issues/1716
`,
Default: fs.SizeSuffix(-1),
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
@@ -762,7 +745,6 @@ Examples:
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
@@ -1039,13 +1021,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxSinglePartSize {
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
}
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -1059,10 +1034,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
}
if opt.DriveID == "" || opt.DriveType == "" {
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
@@ -1425,7 +1396,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// So we have to filter things outside of the root which is
// inefficient.
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error
@@ -2497,10 +2468,6 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
return false, nil
}
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
time.Sleep(5 * time.Second) // a little delay to help things along
return true, err
}
if err != nil {
return shouldRetry(ctx, resp, err)
@@ -2595,8 +2562,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
size := src.Size()
if size < 0 || size > int64(maxSinglePartSize) {
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
}
fs.Debugf(o, "Starting singlepart upload")
@@ -2649,9 +2616,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var info *api.Item
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
if size > 0 {
info, err = o.uploadMultipart(ctx, in, src, options...)
} else if size >= 0 {
} else if size == 0 {
info, err = o.uploadSinglepart(ctx, in, src, options...)
} else {
return errors.New("unknown-sized upload not supported")

View File

@@ -18,8 +18,8 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
@@ -649,7 +649,7 @@ of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)

View File

@@ -27,7 +27,7 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -631,7 +631,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
})

View File

@@ -22,7 +22,7 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
@@ -704,7 +704,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)

View File

@@ -48,8 +48,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -4481,7 +4481,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *types.Ob
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, listOpt{
bucket: bucket,
@@ -4497,16 +4497,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
return callback(entry)
entries = append(entries, entry)
}
return nil
})
if err != nil {
return err
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return nil
return entries, nil
}
// listBuckets lists the buckets to out
@@ -4539,46 +4539,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return list.Flush()
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
}
// ListR lists the objects and directories of the Fs starting
@@ -4599,7 +4567,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucket, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, listOpt{
bucket: bucket,
@@ -5093,7 +5061,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
Usage Examples:
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
@@ -6875,7 +6843,6 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@@ -10,7 +10,6 @@ import (
"fmt"
"io"
iofs "io/fs"
"net/url"
"os"
"path"
"regexp"
@@ -483,14 +482,6 @@ Example:
myUser:myPass@localhost:9005
`,
Advanced: true,
}, {
Name: "http_proxy",
Default: "",
Help: `URL for HTTP CONNECT proxy
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
`,
Advanced: true,
}, {
Name: "copy_is_hardlink",
Default: false,
@@ -554,7 +545,6 @@ type Options struct {
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
SSH fs.SpaceSepList `config:"ssh"`
SocksProxy string `config:"socks_proxy"`
HTTPProxy string `config:"http_proxy"`
CopyIsHardlink bool `config:"copy_is_hardlink"`
}
@@ -580,7 +570,6 @@ type Fs struct {
savedpswd string
sessions atomic.Int32 // count in use sessions
tokens *pacer.TokenDispenser
proxyURL *url.URL // address of HTTP proxy read from environment
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -878,15 +867,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt.Port = "22"
}
// get proxy URL if set
if opt.HTTPProxy != "" {
proxyURL, err := url.Parse(opt.HTTPProxy)
if err != nil {
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
}
f.proxyURL = proxyURL
}
sshConfig := &ssh.ClientConfig{
User: opt.User,
Auth: []ssh.AuthMethod{},

View File

@@ -31,8 +31,6 @@ func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, ssh
)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, addr)
}

View File

@@ -25,8 +25,8 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -846,7 +846,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := list.NewHelper(callback)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)

View File

@@ -1020,9 +1020,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
// Disable ListP always
features.ListP = nil
// show that we wrap other backends
features.Overlay = true

View File

@@ -12,7 +12,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter", "ListP"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
unimplementableObjectMethods = []string{}
)

View File

@@ -82,37 +82,22 @@ type Prop struct {
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
var parseStatus = regexp.MustCompile(`^HTTP/[0-9.]+\s+(\d+)`)
// Code extracts the status code from the first status
func (p *Prop) Code() int {
// StatusOK examines the Status and returns an OK flag
func (p *Prop) StatusOK() bool {
// Assume OK if no statuses received
if len(p.Status) == 0 {
return -1
return true
}
match := parseStatus.FindStringSubmatch(p.Status[0])
if len(match) < 2 {
return 0
return false
}
code, err := strconv.Atoi(match[1])
if err != nil {
return 0
}
return code
}
// StatusOK examines the Status and returns an OK flag
func (p *Prop) StatusOK() bool {
// Fetch status code as int
c := p.Code()
// Assume OK if no statuses received
if c == -1 {
return true
}
if c == 0 {
return false
}
if c >= 200 && c < 300 {
if code >= 200 && code < 300 {
return true
}
return false
}

View File

@@ -1,40 +0,0 @@
package webdav
import (
"errors"
"fmt"
)
var (
// ErrChunkSize is returned when the chunk size is zero
ErrChunkSize = errors.New("tus chunk size must be greater than zero")
// ErrNilLogger is returned when the logger is nil
ErrNilLogger = errors.New("tus logger can't be nil")
// ErrNilStore is returned when the store is nil
ErrNilStore = errors.New("tus store can't be nil if resume is enable")
// ErrNilUpload is returned when the upload is nil
ErrNilUpload = errors.New("tus upload can't be nil")
// ErrLargeUpload is returned when the upload body is to large
ErrLargeUpload = errors.New("tus upload body is to large")
// ErrVersionMismatch is returned when the tus protocol version is mismatching
ErrVersionMismatch = errors.New("tus protocol version mismatch")
// ErrOffsetMismatch is returned when the tus upload offset is mismatching
ErrOffsetMismatch = errors.New("tus upload offset mismatch")
// ErrUploadNotFound is returned when the tus upload is not found
ErrUploadNotFound = errors.New("tus upload not found")
// ErrResumeNotEnabled is returned when the tus resuming is not enabled
ErrResumeNotEnabled = errors.New("tus resuming not enabled")
// ErrFingerprintNotSet is returned when the tus fingerprint is not set
ErrFingerprintNotSet = errors.New("tus fingerprint not set")
)
// ClientError represents an error state of a client
type ClientError struct {
Code int
Body []byte
}
// Error returns an error string containing the client error code
func (c ClientError) Error() string {
return fmt.Sprintf("unexpected status code: %d", c.Code)
}

View File

@@ -1,88 +0,0 @@
package webdav
import (
"bytes"
"encoding/base64"
"fmt"
"io"
"strings"
)
// Metadata is a typedef for a string to string map to hold metadata
type Metadata map[string]string
// Upload is a struct containing the file status during upload
type Upload struct {
stream io.ReadSeeker
size int64
offset int64
Fingerprint string
Metadata Metadata
}
// Updates the Upload information based on offset.
func (u *Upload) updateProgress(offset int64) {
u.offset = offset
}
// Finished returns whether this upload is finished or not.
func (u *Upload) Finished() bool {
return u.offset >= u.size
}
// Progress returns the progress in a percentage.
func (u *Upload) Progress() int64 {
return (u.offset * 100) / u.size
}
// Offset returns the current upload offset.
func (u *Upload) Offset() int64 {
return u.offset
}
// Size returns the size of the upload body.
func (u *Upload) Size() int64 {
return u.size
}
// EncodedMetadata encodes the upload metadata.
func (u *Upload) EncodedMetadata() string {
var encoded []string
for k, v := range u.Metadata {
encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
}
return strings.Join(encoded, ",")
}
func b64encode(s string) string {
return base64.StdEncoding.EncodeToString([]byte(s))
}
// NewUpload creates a new upload from an io.Reader.
func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
stream, ok := reader.(io.ReadSeeker)
if !ok {
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(reader)
if err != nil {
return nil
}
stream = bytes.NewReader(buf.Bytes())
}
if metadata == nil {
metadata = make(Metadata)
}
return &Upload{
stream: stream,
size: size,
Fingerprint: fingerprint,
Metadata: metadata,
}
}

View File

@@ -1,191 +0,0 @@
package webdav
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
// Uploader holds all information about a currently running upload
type Uploader struct {
fs *Fs
url string
upload *Upload
offset int64
aborted bool
uploadSubs []chan Upload
notifyChan chan bool
overridePatchMethod bool
}
// NotifyUploadProgress subscribes to progress updates.
func (u *Uploader) NotifyUploadProgress(c chan Upload) {
u.uploadSubs = append(u.uploadSubs, c)
}
func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
if resp == nil {
return true, err
}
switch resp.StatusCode {
case 204:
if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
*newOff = off
return false, nil
}
return false, err
case 409:
return false, ErrOffsetMismatch
case 412:
return false, ErrVersionMismatch
case 413:
return false, ErrLargeUpload
}
return f.shouldRetry(ctx, resp, err)
}
func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
var method string
if !u.overridePatchMethod {
method = "PATCH"
} else {
method = "POST"
}
extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
extraHeaders["Tus-Resumable"] = "1.0.0"
extraHeaders["filetype"] = u.upload.Metadata["filetype"]
if u.overridePatchMethod {
extraHeaders["X-HTTP-Method-Override"] = "PATCH"
}
url, err := url.Parse(u.url)
if err != nil {
return 0, fmt.Errorf("upload Chunk failed, could not parse url")
}
// FIXME: Use GetBody func as in chunking.go
opts := rest.Opts{
Method: method,
Path: url.Path,
NoResponse: true,
RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host),
ContentLength: &size,
Body: body,
ContentType: "application/offset+octet-stream",
ExtraHeaders: extraHeaders,
Options: options,
}
var newOffset int64
err = u.fs.pacer.CallNoRetry(func() (bool, error) {
res, err := u.fs.srv.Call(ctx, &opts)
return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
})
if err != nil {
return 0, fmt.Errorf("uploadChunk failed: %w", err)
// FIXME What do we do here? Remove the entire upload?
// See https://github.com/tus/tusd/issues/176
}
return newOffset, nil
}
// Upload uploads the entire body to the server.
func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
cnt := 1
fs.Debug(u.fs, "Uploaded starts")
for u.offset < u.upload.size && !u.aborted {
err := u.UploadChunk(ctx, cnt, options...)
cnt++
if err != nil {
return err
}
}
fs.Debug(u.fs, "-- Uploaded finished")
return nil
}
// UploadChunk uploads a single chunk.
func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
chunkSize := u.fs.opt.ChunkSize
data := make([]byte, chunkSize)
_, err := u.upload.stream.Seek(u.offset, 0)
if err != nil {
fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
return err
}
size, err := u.upload.stream.Read(data)
if err != nil {
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
return err
}
body := bytes.NewBuffer(data[:size])
newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
if err == nil {
fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
} else {
fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
return err
}
u.offset = newOffset
u.upload.updateProgress(u.offset)
u.notifyChan <- true
return nil
}
// Waits for a signal to broadcast to all subscribers
func (u *Uploader) broadcastProgress() {
for range u.notifyChan {
for _, c := range u.uploadSubs {
c <- *u.upload
}
}
}
// NewUploader creates a new Uploader.
func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
notifyChan := make(chan bool)
uploader := &Uploader{
f,
url,
upload,
offset,
false,
nil,
notifyChan,
false,
}
go uploader.broadcastProgress()
return uploader
}

View File

@@ -1,108 +0,0 @@
package webdav
/*
Chunked upload based on the tus protocol for ownCloud Infinite Scale
See https://tus.io/protocols/resumable-upload
*/
import (
"context"
"fmt"
"io"
"net/http"
"path/filepath"
"strconv"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/rest"
)
func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
fn := filepath.Base(src.Remote())
metadata := map[string]string{
"filename": fn,
"mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
"filetype": contentType,
}
// Fingerprint is used to identify the upload when resuming. That is not yet implemented
fingerprint := ""
// create an upload from a file.
upload := NewUpload(in, src.Size(), metadata, fingerprint)
// create the uploader.
uploader, err := o.CreateUploader(ctx, upload, options...)
if err == nil {
// start the uploading process.
err = uploader.Upload(ctx, options...)
}
return err
}
func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) {
switch resp.StatusCode {
case 201:
location := resp.Header.Get("Location")
return false, location, nil
case 412:
return false, "", ErrVersionMismatch
case 413:
return false, "", ErrLargeUpload
}
retry, err := f.shouldRetry(ctx, resp, err)
return retry, "", err
}
// CreateUploader creates a new upload to the server.
func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
if u == nil {
return nil, ErrNilUpload
}
// if c.Config.Resume && len(u.Fingerprint) == 0 {
// return nil, ErrFingerprintNotSet
// }
l := int64(0)
p := o.filePath()
// cut the filename off
dir, _ := filepath.Split(p)
if dir == "" {
dir = "/"
}
opts := rest.Opts{
Method: "POST",
Path: dir,
NoResponse: true,
RootURL: o.fs.endpointURL,
ContentLength: &l,
ExtraHeaders: o.extraHeaders(ctx, o),
Options: options,
}
opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
var tusLocation string
// rclone http call
err := o.fs.pacer.CallNoRetry(func() (bool, error) {
var retry bool
res, err := o.fs.srv.Call(ctx, &opts)
retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err)
return retry, err
})
if err != nil {
return nil, fmt.Errorf("making upload directory failed: %w", err)
}
uploader := NewUploader(o.fs, tusLocation, u, 0)
return uploader, nil
}

View File

@@ -84,10 +84,7 @@ func init() {
Help: "Nextcloud",
}, {
Value: "owncloud",
Help: "Owncloud 10 PHP based WebDAV server",
}, {
Value: "infinitescale",
Help: "ownCloud Infinite Scale",
Help: "Owncloud",
}, {
Value: "sharepoint",
Help: "Sharepoint Online, authenticated by Microsoft account",
@@ -215,7 +212,6 @@ type Fs struct {
pacer *fs.Pacer // pacer for API calls
precision time.Duration // mod time precision
canStream bool // set if can stream
canTus bool // supports the TUS upload protocol
useOCMtime bool // set if can use X-OC-Mtime
propsetMtime bool // set if can use propset
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
@@ -266,7 +262,6 @@ func (f *Fs) Features() *fs.Features {
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
423, // Locked
425, // Too Early
429, // Too Many Requests.
500, // Internal Server Error
502, // Bad Gateway
@@ -378,8 +373,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
return nil, fs.ErrorObjectNotFound
}
item := result.Responses[0]
// status code 425 is accepted here as well
if !(item.Props.StatusOK() || item.Props.Code() == 425) {
if !item.Props.StatusOK() {
return nil, fs.ErrorObjectNotFound
}
if itemIsDir(&item) {
@@ -636,15 +630,6 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.propsetMtime = true
f.hasOCMD5 = true
f.hasOCSHA1 = true
case "infinitescale":
f.precision = time.Second
f.useOCMtime = true
f.propsetMtime = true
f.hasOCMD5 = false
f.hasOCSHA1 = true
f.canChunk = false
f.canTus = true
f.opt.ChunkSize = 10 * fs.Mebi
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
@@ -1342,7 +1327,7 @@ func (o *Object) Size() int64 {
ctx := context.TODO()
err := o.readMetaData(ctx)
if err != nil {
fs.Infof(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return 0
}
return o.size
@@ -1386,7 +1371,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
func (o *Object) ModTime(ctx context.Context) time.Time {
err := o.readMetaData(ctx)
if err != nil {
fs.Infof(o, "Failed to read metadata: %v", err)
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
return o.modTime
@@ -1512,21 +1497,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("Update mkParentDir failed: %w", err)
}
if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
fs.Debugf(src, "Update will use the tus protocol to upload")
contentType := fs.MimeType(ctx, src)
err = o.updateViaTus(ctx, in, contentType, src, options...)
if err != nil {
fs.Debug(src, "tus update failed.")
return fmt.Errorf("tus update failed: %w", err)
}
} else if o.shouldUseChunkedUpload(src) {
if o.fs.opt.Vendor == "nextcloud" {
fs.Debugf(src, "Update will use the chunked upload strategy")
err = o.updateChunked(ctx, in, src, options...)
} else {
fs.Debug(src, "Chunking - unknown vendor")
}
if o.shouldUseChunkedUpload(src) {
fs.Debugf(src, "Update will use the chunked upload strategy")
err = o.updateChunked(ctx, in, src, options...)
if err != nil {
return err
}
@@ -1538,9 +1511,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// TODO: define getBody() to enable low-level HTTP/2 retries
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
if err != nil {
return fmt.Errorf("unchunked simple update failed: %w", err)
return err
}
}
// read metadata from remote
o.hasMetaData = false
return o.readMetaData(ctx)
@@ -1550,7 +1524,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
extraHeaders := map[string]string{}
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
if o.fs.useOCMtime {
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
}
// Set one upload checksum
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5

View File

@@ -1,133 +0,0 @@
#!/usr/bin/env python3
"""
This script checks for unauthorized modifications in autogenerated sections of markdown files.
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
Features:
- Detects markdown files changed in the last commit.
- Identifies modified autogenerated sections marked by specific comments.
- Reports violations using GitHub Actions error messages.
- Exits with a nonzero status code if unauthorized changes are found.
It currently only checks the last commit.
"""
import re
import subprocess
import sys
def run_git(args):
"""
Run a Git command with the provided arguments and return its output as a string.
"""
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
def get_changed_files():
"""
Retrieve a list of markdown files that were changed in the last commit.
"""
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
return [f for f in files if f.endswith(".md")]
def get_diff(file):
"""
Get the diff of a given file between the last commit and the current version.
"""
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
def get_file_content(ref, file):
"""
Retrieve the content of a file from a given Git reference.
"""
try:
return run_git(["show", f"{ref}:{file}"]).splitlines()
except Exception:
return []
def find_regions(lines):
"""
Identify the start and end line numbers of autogenerated regions in a file.
"""
regions = []
start = None
for i, line in enumerate(lines, 1):
if "rem autogenerated options start" in line:
start = i
elif "rem autogenerated options stop" in line and start is not None:
regions.append((start, i))
start = None
return regions
def in_region(ln, regions):
"""
Check if a given line number falls within an autogenerated region.
"""
return any(start <= ln <= end for start, end in regions)
def show_error(file_name, line, message):
"""
Print an error message in a GitHub Actions-compatible format.
"""
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
def check_file(file):
"""
Check a markdown file for modifications in autogenerated regions.
"""
viol = False
new_lines = get_file_content("HEAD", file)
old_lines = get_file_content("HEAD~1", file)
# Entire autogenerated file check.
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
if get_diff(file):
show_error(file, 1, "Autogenerated file modified")
return True
return False
# Partial autogenerated regions.
regions_new = find_regions(new_lines)
regions_old = find_regions(old_lines)
diff = get_diff(file)
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
new_ln = old_ln = None
for line in diff:
if line.startswith("@@"):
m = hunk_re.match(line)
if m:
old_ln = int(m.group(1))
new_ln = int(m.group(3))
elif new_ln is None:
continue
elif line.startswith("+"):
if in_region(new_ln, regions_new):
show_error(file, new_ln, "Autogenerated region of file modified")
viol = True
new_ln += 1
elif line.startswith("-"):
if in_region(old_ln, regions_old):
show_error(file, old_ln, "Autogenerated region of file modified")
viol = True
old_ln += 1
else:
new_ln += 1
old_ln += 1
return viol
def main():
"""
Main function that iterates over changed files and checks them for violations.
"""
found = False
for f in get_changed_files():
if check_file(f):
found = True
if found:
sys.exit(1)
print("No unauthorized edits found in autogenerated sections.")
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -53,15 +53,6 @@ import (
_ "github.com/rclone/rclone/cmd/rmdirs"
_ "github.com/rclone/rclone/cmd/selfupdate"
_ "github.com/rclone/rclone/cmd/serve"
_ "github.com/rclone/rclone/cmd/serve/dlna"
_ "github.com/rclone/rclone/cmd/serve/docker"
_ "github.com/rclone/rclone/cmd/serve/ftp"
_ "github.com/rclone/rclone/cmd/serve/http"
_ "github.com/rclone/rclone/cmd/serve/nfs"
_ "github.com/rclone/rclone/cmd/serve/restic"
_ "github.com/rclone/rclone/cmd/serve/s3"
_ "github.com/rclone/rclone/cmd/serve/sftp"
_ "github.com/rclone/rclone/cmd/serve/webdav"
_ "github.com/rclone/rclone/cmd/settier"
_ "github.com/rclone/rclone/cmd/sha1sum"
_ "github.com/rclone/rclone/cmd/size"

View File

@@ -23,23 +23,19 @@ func init() {
}
var commandDefinition = &cobra.Command{
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
Use: "authorize",
Short: `Remote authorization.`,
Long: `Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.
The command requires 1-3 arguments:
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{
"versionIntroduced": "v1.27",
// "groups": "",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 3, command, args)

View File

@@ -1,32 +0,0 @@
package authorize
import (
"bytes"
"strings"
"testing"
"github.com/spf13/cobra"
)
func TestAuthorizeCommand(t *testing.T) {
// Test that the Use string is correctly formatted
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
}
// Test that help output contains the argument information
buf := &bytes.Buffer{}
cmd := &cobra.Command{}
cmd.AddCommand(commandDefinition)
cmd.SetOut(buf)
cmd.SetArgs([]string{"authorize", "--help"})
err := cmd.Execute()
if err != nil {
t.Fatalf("Failed to execute help command: %v", err)
}
helpOutput := buf.String()
if !strings.Contains(helpOutput, "authorize <fs name>") {
t.Errorf("Help output doesn't contain correct usage information")
}
}

View File

@@ -5,6 +5,8 @@ import (
"os"
"sort"
"strconv"
"strings"
"time"
)
// Names comprises a set of file names
@@ -83,3 +85,81 @@ func (am AliasMap) Alias(name1 string) string {
}
return name1
}
// ParseGlobs determines whether a string contains {brackets}
// and returns the substring (including both brackets) for replacing
// substring is first opening bracket to last closing bracket --
// good for {{this}} but not {this}{this}
func ParseGlobs(s string) (hasGlobs bool, substring string) {
open := strings.Index(s, "{")
close := strings.LastIndex(s, "}")
if open >= 0 && close > open {
return true, s[open : close+1]
}
return false, ""
}
// TrimBrackets converts {{this}} to this
func TrimBrackets(s string) string {
return strings.Trim(s, "{}")
}
// TimeFormat converts a user-supplied string to a Go time constant, if possible
func TimeFormat(timeFormat string) string {
switch timeFormat {
case "Layout":
timeFormat = time.Layout
case "ANSIC":
timeFormat = time.ANSIC
case "UnixDate":
timeFormat = time.UnixDate
case "RubyDate":
timeFormat = time.RubyDate
case "RFC822":
timeFormat = time.RFC822
case "RFC822Z":
timeFormat = time.RFC822Z
case "RFC850":
timeFormat = time.RFC850
case "RFC1123":
timeFormat = time.RFC1123
case "RFC1123Z":
timeFormat = time.RFC1123Z
case "RFC3339":
timeFormat = time.RFC3339
case "RFC3339Nano":
timeFormat = time.RFC3339Nano
case "Kitchen":
timeFormat = time.Kitchen
case "Stamp":
timeFormat = time.Stamp
case "StampMilli":
timeFormat = time.StampMilli
case "StampMicro":
timeFormat = time.StampMicro
case "StampNano":
timeFormat = time.StampNano
case "DateTime":
// timeFormat = time.DateTime // missing in go1.19
timeFormat = "2006-01-02 15:04:05"
case "DateOnly":
// timeFormat = time.DateOnly // missing in go1.19
timeFormat = "2006-01-02"
case "TimeOnly":
// timeFormat = time.TimeOnly // missing in go1.19
timeFormat = "15:04:05"
case "MacFriendlyTime", "macfriendlytime", "mac":
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
}
return timeFormat
}
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
func AppyTimeGlobs(s string, t time.Time) string {
hasGlobs, substring := ParseGlobs(s)
if !hasGlobs {
return s
}
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
return strings.ReplaceAll(s, substring, timeString)
}

View File

@@ -3,22 +3,20 @@ package bilib
import (
"bytes"
"log/slog"
"log"
"github.com/rclone/rclone/fs/log"
"github.com/sirupsen/logrus"
)
// CaptureOutput runs a function capturing its output at log level INFO.
// CaptureOutput runs a function capturing its output.
func CaptureOutput(fun func()) []byte {
logSave := log.Writer()
logrusSave := logrus.StandardLogger().Out
buf := &bytes.Buffer{}
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
log.Handler.SetOutput(func(level slog.Level, text string) {
buf.WriteString(text)
})
defer func() {
log.Handler.ResetOutput()
log.Handler.SetLevel(oldLevel)
}()
log.SetOutput(buf)
logrus.SetOutput(buf)
fun()
log.SetOutput(logSave)
logrus.SetOutput(logrusSave)
return buf.Bytes()
}

View File

@@ -4,6 +4,8 @@ import (
"context"
"fmt"
"math"
"mime"
"path"
"strings"
"time"
@@ -11,7 +13,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/terminal"
"github.com/rclone/rclone/lib/transform"
)
// Prefer describes strategies for resolving sync conflicts
@@ -96,8 +97,8 @@ func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
}
// replace glob variables, if any
t := time.Now() // capture static time here so it is the same for all files throughout this run
b.opt.ConflictSuffix1 = transform.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
b.opt.ConflictSuffix2 = transform.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
// append dot (intentionally allow more than one)
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
@@ -129,7 +130,6 @@ type (
path2 namePair
}
)
type namePair struct {
oldName string
newName string
@@ -240,7 +240,24 @@ func SuffixName(ctx context.Context, remote, suffix string) string {
}
ci := fs.GetConfig(ctx)
if ci.SuffixKeepExtension {
return transform.SuffixKeepExtension(remote, suffix)
var (
base = remote
exts = ""
first = true
ext = path.Ext(remote)
)
for ext != "" {
// Look second and subsequent extensions in mime types.
// If they aren't found then don't keep it as an extension.
if !first && mime.TypeByExtension(ext) == "" {
break
}
base = base[:len(base)-len(ext)]
exts = ext + exts
first = false
ext = path.Ext(base)
}
return base + suffix + exts
}
return remote + suffix
}

View File

@@ -3,106 +3,502 @@ package convmv
import (
"context"
"encoding/base64"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"unicode/utf8"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/lib/transform"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/random"
"github.com/spf13/cobra"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/unicode/norm"
)
// Globals
var (
deleteEmptySrcDirs = false
createEmptySrcDirs = false
Opt ConvOpt
Cmaps = map[int]*charmap.Charmap{}
)
// ConvOpt sets the conversion options
type ConvOpt struct {
ctx context.Context
f fs.Fs
ConvertAlgo Convert
FindReplace []string
Prefix string
Suffix string
Max int
Enc encoder.MultiEncoder
CmapFlag fs.Enum[cmapChoices]
Cmap *charmap.Charmap
List bool
}
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "")
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "")
flags.FVarP(cmdFlags, &Opt.ConvertAlgo, "conv", "t", "Conversion algorithm: "+Opt.ConvertAlgo.Help(), "")
flags.StringVarP(cmdFlags, &Opt.Prefix, "prefix", "", "", "In 'prefix' or 'trimprefix' mode, append or trim this prefix", "")
flags.StringVarP(cmdFlags, &Opt.Suffix, "suffix", "", "", "In 'suffix' or 'trimsuffix' mode, append or trim this suffix", "")
flags.IntVarP(cmdFlags, &Opt.Max, "max", "m", -1, "In 'truncate' mode, truncate all path segments longer than this many characters", "")
flags.StringArrayVarP(cmdFlags, &Opt.FindReplace, "replace", "r", nil, "In 'replace' mode, this is a pair of find,replace values (can repeat flag more than once)", "")
flags.FVarP(cmdFlags, &Opt.Enc, "encoding", "", "Custom backend encoding: (use --list to see full list)", "")
flags.FVarP(cmdFlags, &Opt.CmapFlag, "charmap", "", "Other character encoding (use --list to see full list) ", "")
flags.BoolVarP(cmdFlags, &Opt.List, "list", "", false, "Print full list of options", "")
}
// Convert describes conversion setting
type Convert = fs.Enum[convertChoices]
// Supported conversion options
const (
ConvNone Convert = iota
ConvToNFC
ConvToNFD
ConvToNFKC
ConvToNFKD
ConvFindReplace
ConvPrefix
ConvSuffix
ConvTrimPrefix
ConvTrimSuffix
ConvIndex
ConvDate
ConvTruncate
ConvBase64Encode
ConvBase64Decode
ConvEncoder
ConvDecoder
ConvISO8859_1
ConvWindows1252
ConvMacintosh
ConvCharmap
ConvLowercase
ConvUppercase
ConvTitlecase
ConvASCII
ConvURL
ConvMapper
)
type convertChoices struct{}
func (convertChoices) Choices() []string {
return []string{
ConvNone: "none",
ConvToNFC: "nfc",
ConvToNFD: "nfd",
ConvToNFKC: "nfkc",
ConvToNFKD: "nfkd",
ConvFindReplace: "replace",
ConvPrefix: "prefix",
ConvSuffix: "suffix",
ConvTrimPrefix: "trimprefix",
ConvTrimSuffix: "trimsuffix",
ConvIndex: "index",
ConvDate: "date",
ConvTruncate: "truncate",
ConvBase64Encode: "base64encode",
ConvBase64Decode: "base64decode",
ConvEncoder: "encoder",
ConvDecoder: "decoder",
ConvISO8859_1: "ISO-8859-1",
ConvWindows1252: "Windows-1252",
ConvMacintosh: "Macintosh",
ConvCharmap: "charmap",
ConvLowercase: "lowercase",
ConvUppercase: "uppercase",
ConvTitlecase: "titlecase",
ConvASCII: "ascii",
ConvURL: "url",
ConvMapper: "mapper",
}
}
func (convertChoices) Type() string {
return "string"
}
type cmapChoices struct{}
func (cmapChoices) Choices() []string {
choices := make([]string, 1)
i := 0
for _, enc := range charmap.All {
c, ok := enc.(*charmap.Charmap)
if !ok {
continue
}
name := strings.ReplaceAll(c.String(), " ", "-")
if name == "" {
name = fmt.Sprintf("unknown-%d", i)
}
Cmaps[i] = c
choices = append(choices, name)
i++
}
return choices
}
func (cmapChoices) Type() string {
return "string"
}
func charmapByID(cm fs.Enum[cmapChoices]) *charmap.Charmap {
c, ok := Cmaps[int(cm)]
if ok {
return c
}
return nil
}
var commandDefinition = &cobra.Command{
Use: "convmv dest:path --name-transform XXX",
Short: `Convert file and directory names in place.`,
// Warning¡ "¡" will be replaced by backticks below
Use: "convmv source:path",
Short: `Convert file and directory names`,
// Warning! "|" will be replaced by backticks below
Long: strings.ReplaceAll(`
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
This command renames files and directory names according a user supplied conversion.
`+transform.SprintList()+`
It is useful for renaming a lot of files in an automated way.
Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
`+sprintList()+`
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
## Files vs Directories ##
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
However some of the transforms would be better applied to the whole path or just directories.
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡
| Tag | Effect |
|------|------|
| ¡file¡ | Only transform the leaf name of files (DEFAULT) |
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
| ¡all¡ | Transform the entire path for files and directories |
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
## Ordering and Conflicts ##
* Transformations will be applied in the order specified by the user.
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
* Each transformation will be run one path segment at a time.
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
* It is up to the user to put the transformations in a sensible order.
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
* Users should be aware that certain combinations may lead to unexpected results and should verify
transformations using ¡--dry-run¡ before execution.
## Race Conditions and Non-Deterministic Behavior ##
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
* To minimize risks, users should:
* Carefully review transformations that may introduce conflicts.
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
`, "¡", "`"),
`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.70",
"groups": "Filter,Listing,Important,Copy",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fdst, srcFileName := cmd.NewFsFile(args[0])
cmd.Run(false, true, command, func() error {
if !transform.Transforming(context.Background()) {
return errors.New("--name-transform must be set")
}
if srcFileName == "" {
return sync.Transform(context.Background(), fdst, deleteEmptySrcDirs, createEmptySrcDirs)
}
return operations.TransformFile(context.Background(), fdst, srcFileName)
fsrc, srcFileName := cmd.NewFsFile(args[0])
cmd.Run(false, true, command, func() error { // retries switched off to prevent double-encoding
return Convmv(context.Background(), fsrc, srcFileName)
})
},
}
// Convmv converts and renames files and directories
// pass srcFileName == "" to convmv every object in fsrc instead of a single object
func Convmv(ctx context.Context, f fs.Fs, srcFileName string) error {
Opt.ctx = ctx
Opt.f = f
if Opt.List {
printList()
return nil
}
err := Opt.validate()
if err != nil {
return err
}
if srcFileName == "" {
// it's a dir
return walkConv(ctx, f, "")
}
// it's a file
obj, err := f.NewObject(Opt.ctx, srcFileName)
if err != nil {
return err
}
oldName, newName, skip, err := parseEntry(obj)
if err != nil {
return err
}
if skip {
return nil
}
return operations.MoveFile(Opt.ctx, Opt.f, Opt.f, newName, oldName)
}
func (opt *ConvOpt) validate() error {
switch opt.ConvertAlgo {
case ConvNone:
return errors.New("must choose a conversion mode with -t flag")
case ConvFindReplace:
if len(opt.FindReplace) == 0 {
return errors.New("must include --replace flag in replace mode")
}
for _, set := range opt.FindReplace {
split := strings.Split(set, ",")
if len(split) != 2 {
return errors.New("--replace must include exactly two comma-separated values")
}
if split[0] == "" {
return errors.New("'find' value cannot be blank ('replace' can be)")
}
}
case ConvPrefix, ConvTrimPrefix:
if opt.Prefix == "" {
return errors.New("must include a --prefix")
}
case ConvSuffix, ConvTrimSuffix:
if opt.Suffix == "" {
return errors.New("must include a --suffix")
}
case ConvTruncate:
if opt.Max < 1 {
return errors.New("--max cannot be less than 1 in 'truncate' mode")
}
case ConvCharmap:
if opt.CmapFlag == 0 {
return errors.New("must specify a charmap with --charmap flag")
}
c := charmapByID(opt.CmapFlag)
if c == nil {
return errors.New("unknown charmap")
}
opt.Cmap = c
}
return nil
}
// keeps track of which dirs we've already renamed
func walkConv(ctx context.Context, f fs.Fs, dir string) error {
entries, err := list.DirSorted(ctx, f, false, dir)
if err != nil {
return err
}
return walkFunc(dir, entries, nil)
}
func walkFunc(path string, entries fs.DirEntries, err error) error {
fs.Debugf(path, "walking dir")
if err != nil {
return err
}
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
oldName, newName, skip, err := parseEntry(x)
if err != nil {
return err
}
if skip {
continue
}
fs.Debugf(x, "%v %v %v %v %v", Opt.ctx, Opt.f, Opt.f, newName, oldName)
err = operations.MoveFile(Opt.ctx, Opt.f, Opt.f, newName, oldName)
if err != nil {
return err
}
case fs.Directory:
oldName, newName, skip, err := parseEntry(x)
if err != nil {
return err
}
if !skip { // still want to recurse during dry-runs to get accurate logs
err = DirMoveCaseInsensitive(Opt.ctx, Opt.f, oldName, newName)
if err != nil {
return err
}
} else {
newName = oldName // otherwise dry-runs won't be able to find it
}
// recurse, calling it by its new name
err = walkConv(Opt.ctx, Opt.f, newName)
if err != nil {
return err
}
}
}
return nil
}
// ConvertPath converts a path string according to the chosen ConvertAlgo.
// Each path segment is converted separately, to preserve path separators.
// If baseOnly is true, only the base will be converted (useful for renaming while walking a dir tree recursively.)
// for example, "some/nested/path" -> "some/nested/CONVERTEDPATH"
// otherwise, the entire is path is converted.
func ConvertPath(s string, ConvertAlgo Convert, baseOnly bool) (string, error) {
if s == "" || s == "/" || s == "\\" || s == "." {
return "", nil
}
if baseOnly {
convertedBase, err := ConvertPathSegment(filepath.Base(s), ConvertAlgo)
return filepath.Join(filepath.Dir(s), convertedBase), err
}
segments := strings.Split(s, string(os.PathSeparator))
convertedSegments := make([]string, len(segments))
for _, seg := range segments {
convSeg, err := ConvertPathSegment(seg, ConvertAlgo)
if err != nil {
return "", err
}
convertedSegments = append(convertedSegments, convSeg)
}
return filepath.Join(convertedSegments...), nil
}
// ConvertPathSegment converts one path segment (or really any string) according to the chosen ConvertAlgo.
// It assumes path separators have already been trimmed.
func ConvertPathSegment(s string, ConvertAlgo Convert) (string, error) {
fs.Debugf(s, "converting")
switch ConvertAlgo {
case ConvNone:
return s, nil
case ConvToNFC:
return norm.NFC.String(s), nil
case ConvToNFD:
return norm.NFD.String(s), nil
case ConvToNFKC:
return norm.NFKC.String(s), nil
case ConvToNFKD:
return norm.NFKD.String(s), nil
case ConvBase64Encode:
return base64.URLEncoding.EncodeToString([]byte(s)), nil // URLEncoding to avoid slashes
case ConvBase64Decode:
if s == ".DS_Store" {
return s, nil
}
b, err := base64.URLEncoding.DecodeString(s)
return string(b), err
case ConvFindReplace:
oldNews := []string{}
for _, pair := range Opt.FindReplace {
split := strings.Split(pair, ",")
oldNews = append(oldNews, split...)
}
replacer := strings.NewReplacer(oldNews...)
return replacer.Replace(s), nil
case ConvPrefix:
return Opt.Prefix + s, nil
case ConvSuffix:
return s + Opt.Suffix, nil
case ConvTrimPrefix:
return strings.TrimPrefix(s, Opt.Prefix), nil
case ConvTrimSuffix:
return strings.TrimSuffix(s, Opt.Suffix), nil
case ConvTruncate:
if Opt.Max <= 0 {
return s, nil
}
if utf8.RuneCountInString(s) <= Opt.Max {
return s, nil
}
runes := []rune(s)
return string(runes[:Opt.Max]), nil
case ConvEncoder:
return Opt.Enc.Encode(s), nil
case ConvDecoder:
return Opt.Enc.Decode(s), nil
case ConvISO8859_1:
return encodeWithReplacement(s, charmap.ISO8859_1), nil
case ConvWindows1252:
return encodeWithReplacement(s, charmap.Windows1252), nil
case ConvMacintosh:
return encodeWithReplacement(s, charmap.Macintosh), nil
case ConvCharmap:
return encodeWithReplacement(s, Opt.Cmap), nil
case ConvLowercase:
return strings.ToLower(s), nil
case ConvUppercase:
return strings.ToUpper(s), nil
case ConvTitlecase:
return strings.ToTitle(s), nil
case ConvASCII:
return toASCII(s), nil
default:
return "", errors.New("this option is not yet implemented")
}
}
func parseEntry(e fs.DirEntry) (oldName, newName string, skip bool, err error) {
oldName = e.Remote()
newName, err = ConvertPath(oldName, Opt.ConvertAlgo, true)
if err != nil {
fs.Errorf(oldName, "error converting: %v", err)
return oldName, newName, true, err
}
if oldName == newName {
fs.Debugf(oldName, "name is already correct - skipping")
return oldName, newName, true, nil
}
skip = operations.SkipDestructive(Opt.ctx, oldName, "rename to "+newName)
return oldName, newName, skip, nil
}
// DirMoveCaseInsensitive does DirMove in two steps (to temp name, then real name)
// which is necessary for some case-insensitive backends
func DirMoveCaseInsensitive(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
tmpDstRemote := dstRemote + "-rclone-move-" + random.String(8)
err = operations.DirMove(ctx, f, srcRemote, tmpDstRemote)
if err != nil {
return err
}
return operations.DirMove(ctx, f, tmpDstRemote, dstRemote)
}
func encodeWithReplacement(s string, cmap *charmap.Charmap) string {
return strings.Map(func(r rune) rune {
b, ok := cmap.EncodeRune(r)
if !ok {
return '_'
}
return cmap.DecodeByte(b)
}, s)
}
func toASCII(s string) string {
return strings.Map(func(r rune) rune {
if r <= 127 {
return r
}
return -1
}, s)
}
func sprintList() string {
var out strings.Builder
_, _ = out.WriteString(`### Conversion modes
The conversion mode |-t| or |--conv| flag must be specified. This
defines what transformation the |convmv| command will make.
`)
for _, v := range Opt.ConvertAlgo.Choices() {
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
}
_, _ = out.WriteRune('\n')
_, _ = out.WriteString(`### Char maps
These are the choices for the |--charmap| flag.
`)
for _, v := range Opt.CmapFlag.Choices() {
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
}
_, _ = out.WriteRune('\n')
_, _ = out.WriteString(`### Encoding masks
These are the valid options for the --encoding flag.
`)
for _, v := range strings.Split(encoder.ValidStrings(), ", ") {
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
}
_, _ = out.WriteRune('\n')
sprintExamples(&out)
return out.String()
}
func printList() {
fmt.Println(sprintList())
}

View File

@@ -0,0 +1,87 @@
package convmv
import (
"fmt"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/encoder"
)
type example struct {
Opt ConvOpt
Path string
}
var examples = []example{
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvUppercase}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvFindReplace, FindReplace: []string{"Fox,Turtle", "Quick,Slow"}}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvBase64Encode}},
{Path: `c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0`, Opt: ConvOpt{ConvertAlgo: ConvBase64Decode}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvToNFC}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvToNFD}},
{Path: `stories/The Quick Brown 🦊 Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvASCII}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvTrimSuffix, Suffix: ".txt"}},
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvPrefix, Prefix: "OLD_"}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvCharmap, CmapFlag: 20}},
{Path: `stories/The Quick Brown Fox: A Memoir [draft].txt`, Opt: ConvOpt{ConvertAlgo: ConvEncoder, Enc: encoder.EncodeColon | encoder.EncodeSquareBracket}},
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvTruncate, Max: 21}},
}
func (e example) command() string {
s := fmt.Sprintf(`rclone convmv %q -t %s`, e.Path, e.Opt.ConvertAlgo)
switch e.Opt.ConvertAlgo {
case ConvFindReplace:
for _, r := range e.Opt.FindReplace {
s += fmt.Sprintf(` -r %q`, r)
}
case ConvTrimPrefix, ConvPrefix:
s += fmt.Sprintf(` --prefix %q`, e.Opt.Prefix)
case ConvTrimSuffix, ConvSuffix:
s += fmt.Sprintf(` --suffix %q`, e.Opt.Suffix)
case ConvCharmap:
s += fmt.Sprintf(` --charmap %q`, e.Opt.CmapFlag.String())
case ConvEncoder:
s += fmt.Sprintf(` --encoding %q`, e.Opt.Enc.String())
case ConvTruncate:
s += fmt.Sprintf(` --max %d`, e.Opt.Max)
}
return s
}
func (e example) output() string {
_ = e.Opt.validate()
Opt = e.Opt
s, err := ConvertPath(e.Path, e.Opt.ConvertAlgo, false)
if err != nil {
fs.Errorf(s, "error: %v", err)
}
return s
}
// go run ./ convmv --help
func sprintExamples(out *strings.Builder) {
_, _ = fmt.Fprintf(out, `### Examples:
Here are some examples of rclone convmv in action.
`)
for _, e := range examples {
_, _ = fmt.Fprintf(out, "```\n%s\n", e.command())
_, _ = fmt.Fprintf(out, "// Output: %s\n```\n\n", e.output())
}
Opt = ConvOpt{} // reset
}
/* func sprintAllCharmapExamples() string {
s := ""
e := example{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvCharmap, CmapFlag: 0}}
for i := range Cmaps {
e.Opt.CmapFlag++
_ = e.Opt.validate()
Opt = e.Opt
s += fmt.Sprintf("%d Command: %s \n", i, e.command())
s += fmt.Sprintf("Result: %s \n\n", e.output())
}
return s
} */

View File

@@ -5,22 +5,22 @@ import (
"cmp"
"context"
"fmt"
"path"
"path/filepath"
"slices"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/all" // import all backends
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"golang.org/x/text/unicode/norm"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/transform"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Some times used in the tests
@@ -34,114 +34,62 @@ func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestTransform(t *testing.T) {
func TestConvmv(t *testing.T) {
type args struct {
TransformOpt []string
TransformBackOpt []string
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
ConvertAlgo fs.Enum[convertChoices]
ConvertBackAlgo fs.Enum[convertChoices]
Lossless bool // whether the ConvertBackAlgo is always losslessly invertible
ExtraOpt ConvOpt
}
tests := []struct {
name string
args args
}{
{name: "NFC", args: args{
TransformOpt: []string{"all,nfc"},
TransformBackOpt: []string{"all,nfd"},
Lossless: false,
}},
{name: "NFD", args: args{
TransformOpt: []string{"all,nfd"},
TransformBackOpt: []string{"all,nfc"},
Lossless: false,
}},
{name: "base64", args: args{
TransformOpt: []string{"all,base64encode"},
TransformBackOpt: []string{"all,base64encode"},
Lossless: false,
}},
{name: "prefix", args: args{
TransformOpt: []string{"all,prefix=PREFIX"},
TransformBackOpt: []string{"all,trimprefix=PREFIX"},
Lossless: true,
}},
{name: "suffix", args: args{
TransformOpt: []string{"all,suffix=SUFFIX"},
TransformBackOpt: []string{"all,trimsuffix=SUFFIX"},
Lossless: true,
}},
{name: "truncate", args: args{
TransformOpt: []string{"all,truncate=10"},
TransformBackOpt: []string{"all,truncate=10"},
Lossless: false,
}},
{name: "encoder", args: args{
TransformOpt: []string{"all,encoder=Colon,SquareBracket"},
TransformBackOpt: []string{"all,decoder=Colon,SquareBracket"},
Lossless: true,
}},
{name: "ISO-8859-1", args: args{
TransformOpt: []string{"all,ISO-8859-1"},
TransformBackOpt: []string{"all,ISO-8859-1"},
Lossless: false,
}},
{name: "charmap", args: args{
TransformOpt: []string{"all,charmap=ISO-8859-7"},
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
Lossless: false,
}},
{name: "lowercase", args: args{
TransformOpt: []string{"all,lowercase"},
TransformBackOpt: []string{"all,lowercase"},
Lossless: false,
}},
{name: "ascii", args: args{
TransformOpt: []string{"all,ascii"},
TransformBackOpt: []string{"all,ascii"},
Lossless: false,
}},
{name: "NFC", args: args{ConvertAlgo: ConvToNFC, ConvertBackAlgo: ConvToNFD, Lossless: false}},
{name: "NFD", args: args{ConvertAlgo: ConvToNFD, ConvertBackAlgo: ConvToNFC, Lossless: false}},
{name: "NFKC", args: args{ConvertAlgo: ConvToNFKC, ConvertBackAlgo: ConvToNFKD, Lossless: false}},
{name: "NFKD", args: args{ConvertAlgo: ConvToNFKD, ConvertBackAlgo: ConvToNFKC, Lossless: false}},
{name: "base64", args: args{ConvertAlgo: ConvBase64Encode, ConvertBackAlgo: ConvBase64Decode, Lossless: true}},
{name: "replace", args: args{ConvertAlgo: ConvFindReplace, ConvertBackAlgo: ConvFindReplace, Lossless: true, ExtraOpt: ConvOpt{FindReplace: []string{"bread,banana", "pie,apple", "apple,pie", "banana,bread"}}}},
{name: "prefix", args: args{ConvertAlgo: ConvPrefix, ConvertBackAlgo: ConvTrimPrefix, Lossless: true, ExtraOpt: ConvOpt{Prefix: "PREFIX"}}},
{name: "suffix", args: args{ConvertAlgo: ConvSuffix, ConvertBackAlgo: ConvTrimSuffix, Lossless: true, ExtraOpt: ConvOpt{Suffix: "SUFFIX"}}},
{name: "truncate", args: args{ConvertAlgo: ConvTruncate, ConvertBackAlgo: ConvTruncate, Lossless: false, ExtraOpt: ConvOpt{Max: 10}}},
{name: "encoder", args: args{ConvertAlgo: ConvEncoder, ConvertBackAlgo: ConvDecoder, Lossless: true, ExtraOpt: ConvOpt{Enc: encoder.OS}}},
{name: "ISO-8859-1", args: args{ConvertAlgo: ConvISO8859_1, ConvertBackAlgo: ConvISO8859_1, Lossless: false}},
{name: "charmap", args: args{ConvertAlgo: ConvCharmap, ConvertBackAlgo: ConvCharmap, Lossless: false, ExtraOpt: ConvOpt{CmapFlag: 3}}},
{name: "lowercase", args: args{ConvertAlgo: ConvLowercase, ConvertBackAlgo: ConvUppercase, Lossless: false}},
{name: "ascii", args: args{ConvertAlgo: ConvASCII, ConvertBackAlgo: ConvASCII, Lossless: false}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
ctx := context.Background()
r.Mkdir(ctx, r.Flocal)
r.Mkdir(ctx, r.Fremote)
items := makeTestFiles(t, r, "dir1")
err := r.Fremote.Mkdir(ctx, "empty/empty")
require.NoError(t, err)
err = r.Flocal.Mkdir(ctx, "empty/empty")
require.NoError(t, err)
deleteDSStore(t, r)
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
r.CheckLocalListing(t, items, []string{"dir1", "empty", "empty/empty"})
r.CheckRemoteListing(t, items, nil)
err = transform.SetOptions(ctx, tt.args.TransformOpt...)
require.NoError(t, err)
err = sync.Transform(ctx, r.Fremote, true, true)
Opt = tt.args.ExtraOpt
Opt.ConvertAlgo = tt.args.ConvertAlgo
err := Convmv(context.Background(), r.Fremote, "")
assert.NoError(t, err)
compareNames(ctx, t, r, items)
compareNames(t, r, items)
transformedItems := transformItems(ctx, t, items)
r.CheckRemoteListing(t, transformedItems, []string{transform.Path(ctx, "dir1", true), transform.Path(ctx, "empty", true), transform.Path(ctx, "empty/empty", true)})
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
require.NoError(t, err)
err = sync.Transform(ctx, r.Fremote, true, true)
convertedItems := convertItems(t, items)
Opt.ConvertAlgo = tt.args.ConvertBackAlgo
err = Convmv(context.Background(), r.Fremote, "")
assert.NoError(t, err)
compareNames(ctx, t, r, transformedItems)
compareNames(t, r, convertedItems)
if tt.args.Lossless {
deleteDSStore(t, r)
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
r.CheckRemoteItems(t, items...)
}
})
}
}
// const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሠበዠጠᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
const alphabet = "abcdefg123456789"
const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሠበዠጠᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
@@ -152,19 +100,17 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
items := []fstest.Item{}
for _, c := range alphabet {
var out strings.Builder
for i := rune(0); i < 7; i++ {
for i := rune(0); i < 32; i++ {
out.WriteRune(c + i)
}
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
fileName = strings.ToValidUTF8(fileName, "")
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
if debug != "" {
fileName = debug
}
item := r.WriteObject(context.Background(), fileName, fileName, t1)
r.WriteFile(fileName, fileName, t1)
items = append(items, item)
n++
@@ -175,7 +121,6 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
for _, extra := range extras {
item := r.WriteObject(context.Background(), extra, extra, t1)
r.WriteFile(extra, extra, t1)
items = append(items, item)
}
@@ -192,7 +137,7 @@ func deleteDSStore(t *testing.T, r *fstest.Run) {
assert.NoError(t, err)
}
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
func compareNames(t *testing.T, r *fstest.Run, items []fstest.Item) {
var entries fs.DirEntries
deleteDSStore(t, r)
@@ -213,8 +158,10 @@ func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fste
// sort by CONVERTED name
slices.SortStableFunc(items, func(a, b fstest.Item) int {
aConv := transform.Path(ctx, a.Path, false)
bConv := transform.Path(ctx, b.Path, false)
aConv, err := ConvertPath(a.Path, Opt.ConvertAlgo, false)
require.NoError(t, err, a.Path)
bConv, err := ConvertPath(b.Path, Opt.ConvertAlgo, false)
require.NoError(t, err, b.Path)
return cmp.Compare(aConv, bConv)
})
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
@@ -222,21 +169,23 @@ func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fste
})
for i, e := range entries {
expect := transform.Path(ctx, items[i].Path, false)
expect, err := ConvertPath(items[i].Path, Opt.ConvertAlgo, false)
assert.NoError(t, err)
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
assert.Equal(t, expect, e.Remote(), msg)
}
}
func transformItems(ctx context.Context, t *testing.T, items []fstest.Item) []fstest.Item {
transformedItems := []fstest.Item{}
func convertItems(t *testing.T, items []fstest.Item) []fstest.Item {
convertedItems := []fstest.Item{}
for _, item := range items {
newPath := transform.Path(ctx, item.Path, false)
newPath, err := ConvertPath(item.Path, Opt.ConvertAlgo, false)
assert.NoError(t, err)
newItem := item
newItem.Path = newPath
transformedItems = append(transformedItems, newItem)
convertedItems = append(convertedItems, newItem)
}
return transformedItems
return convertedItems
}
func detectEncoding(s string) string {

View File

@@ -43,7 +43,7 @@ Setting |--auto-filename| will attempt to automatically determine the
filename from the URL (after any redirections) and used in the
destination path.
With |--header-filename| in addition, if a specific filename is
With |--auto-filename-header| in addition, if a specific filename is
set in HTTP headers, it will be used instead of the name from the URL.
With |--print-filename| in addition, the resulting file name will be
printed.

View File

@@ -1,131 +0,0 @@
package gitannex
import (
"fmt"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fspath"
)
type configID int
const (
configRemoteName configID = iota
configPrefix
configLayout
)
// configDefinition describes a configuration value required by this command. We
// use "GETCONFIG" messages to query git-annex for these values at runtime.
type configDefinition struct {
id configID
names []string
description string
defaultValue string
}
const (
defaultRclonePrefix = "git-annex-rclone"
defaultRcloneLayout = "nodir"
)
var requiredConfigs = []configDefinition{
{
id: configRemoteName,
names: []string{"rcloneremotename", "target"},
description: "Name of the rclone remote to use. " +
"Must match a remote known to rclone. " +
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
},
{
id: configPrefix,
names: []string{"rcloneprefix", "prefix"},
description: "Directory where rclone will write git-annex content. " +
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
"This directory will be created on init if it does not exist.",
defaultValue: defaultRclonePrefix,
},
{
id: configLayout,
names: []string{"rclonelayout", "rclone_layout"},
description: "Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
defaultValue: defaultRcloneLayout,
},
}
func (c *configDefinition) getCanonicalName() string {
if len(c.names) < 1 {
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
}
return c.names[0]
}
// fullDescription returns a single-line, human-readable description for this
// config. The returned string begins with a list of synonyms and ends with
// `c.description`.
func (c *configDefinition) fullDescription() string {
if len(c.names) <= 1 {
return c.description
}
// Exclude the canonical name from the list of synonyms.
synonyms := c.names[1:len(c.names)]
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
}
// validateRemoteName validates the "rcloneremotename" config that we receive
// from git-annex. It returns nil iff `value` is valid. Otherwise, it returns a
// descriptive error suitable for sending back to git-annex via stdout.
//
// The value is only valid when:
// 1. It is the exact name of an existing remote.
// 2. It is an fspath string that names an existing remote or a backend. The
// string may include options, but it must not include a path. (That's what
// the "rcloneprefix" config is for.)
//
// While backends are not remote names, per se, they are permitted for
// compatibility with [fstest]. We could guard this behavior behind
// [testing.Testing] to prevent users from specifying backend strings, but
// there's no obvious harm in permitting it.
func validateRemoteName(value string) error {
remoteNames := config.GetRemoteNames()
// Check whether `value` is an exact match for an existing remote.
//
// If we checked whether [cache.Get] returns [fs.ErrorNotFoundInConfigFile],
// we would incorrectly identify file names as valid remote names. We also
// avoid [config.FileSections] because it will miss remotes that are defined
// by environment variables.
if slices.Contains(remoteNames, value) {
return nil
}
parsed, err := fspath.Parse(value)
if err != nil {
return fmt.Errorf("remote could not be parsed: %s", value)
}
if parsed.Path != "" {
return fmt.Errorf("remote does not exist or incorrectly contains a path: %s", value)
}
// Now that we've established `value` is an fspath string that does not
// include a path component, we only need to check whether it names an
// existing remote or backend.
if slices.Contains(remoteNames, parsed.Name) {
return nil
}
maybeBackend := strings.HasPrefix(value, ":")
if !maybeBackend {
return fmt.Errorf("remote does not exist: %s", value)
}
// Strip the leading colon before searching for the backend. For instance,
// search for "local" instead of ":local". Note that `parsed.Name` already
// omits any config options baked into the string.
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
if _, err = fs.Find(trimmedBackendName); err != nil {
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
}
return nil
}

View File

@@ -28,11 +28,14 @@ import (
"io"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -107,6 +110,35 @@ func (m *messageParser) finalParameter() string {
return param
}
// configDefinition describes a configuration value required by this command. We
// use "GETCONFIG" messages to query git-annex for these values at runtime.
type configDefinition struct {
names []string
description string
destination *string
defaultValue *string
}
func (c *configDefinition) getCanonicalName() string {
if len(c.names) < 1 {
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
}
return c.names[0]
}
// fullDescription returns a single-line, human-readable description for this
// config. The returned string begins with a list of synonyms and ends with
// `c.description`.
func (c *configDefinition) fullDescription() string {
if len(c.names) <= 1 {
return c.description
}
// Exclude the canonical name from the list of synonyms.
synonyms := c.names[1:len(c.names)]
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
}
// server contains this command's current state.
type server struct {
reader *bufio.Reader
@@ -242,31 +274,81 @@ func (s *server) handleInitRemote() error {
return fmt.Errorf("failed to get configs: %w", err)
}
if err := validateRemoteName(s.configRcloneRemoteName); err != nil {
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
return fmt.Errorf("failed to init remote: %w", err)
// Explicitly check whether [server.configRcloneRemoteName] names a remote.
//
// - We do not permit file paths in the remote name; that's what
// [s.configPrefix] is for. If we simply checked whether [cache.Get]
// returns [fs.ErrorNotFoundInConfigFile], we would incorrectly identify
// file names as valid remote names.
//
// - In order to support remotes defined by environment variables, we must
// use [config.GetRemoteNames] instead of [config.FileSections].
trimmedName := strings.TrimSuffix(s.configRcloneRemoteName, ":")
if slices.Contains(config.GetRemoteNames(), trimmedName) {
s.sendMsg("INITREMOTE-SUCCESS")
return nil
}
if mode := parseLayoutMode(s.configRcloneLayout); mode == layoutModeUnknown {
err := fmt.Errorf("unknown layout mode: %s", s.configRcloneLayout)
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
return fmt.Errorf("failed to init remote: %w", err)
// Otherwise, check whether [server.configRcloneRemoteName] is actually a
// backend string such as ":local:". These are not remote names, per se, but
// they are permitted for compatibility with [fstest]. We could guard this
// behavior behind [testing.Testing] to prevent users from specifying
// backend strings, but there's no obvious harm in permitting it.
maybeBackend := strings.HasPrefix(s.configRcloneRemoteName, ":")
if !maybeBackend {
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
return fmt.Errorf("remote does not exist: %s", s.configRcloneRemoteName)
}
parsed, err := fspath.Parse(s.configRcloneRemoteName)
if err != nil {
s.sendMsg("INITREMOTE-FAILURE remote could not be parsed as a backend: " + s.configRcloneRemoteName)
return fmt.Errorf("remote could not be parsed as a backend: %s", s.configRcloneRemoteName)
}
if parsed.Path != "" {
s.sendMsg("INITREMOTE-FAILURE backend must not have a path: " + s.configRcloneRemoteName)
return fmt.Errorf("backend must not have a path: %s", s.configRcloneRemoteName)
}
// Strip the leading colon and options before searching for the backend,
// i.e. search for "local" instead of ":local,description=hello:/tmp/foo".
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
if _, err = fs.Find(trimmedBackendName); err != nil {
s.sendMsg("INITREMOTE-FAILURE backend does not exist: " + trimmedBackendName)
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
}
s.sendMsg("INITREMOTE-SUCCESS")
return nil
}
func (s *server) mustSetConfigValue(id configID, value string) {
switch id {
case configRemoteName:
s.configRcloneRemoteName = value
case configPrefix:
s.configPrefix = value
case configLayout:
s.configRcloneLayout = value
default:
panic(fmt.Errorf("unhandled configId: %v", id))
// Get a list of configs with pointers to fields of `s`.
func (s *server) getRequiredConfigs() []configDefinition {
defaultRclonePrefix := "git-annex-rclone"
defaultRcloneLayout := "nodir"
return []configDefinition{
{
[]string{"rcloneremotename", "target"},
"Name of the rclone remote to use. " +
"Must match a remote known to rclone. " +
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
&s.configRcloneRemoteName,
nil,
},
{
[]string{"rcloneprefix", "prefix"},
"Directory where rclone will write git-annex content. " +
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
"This directory will be created on init if it does not exist.",
&s.configPrefix,
&defaultRclonePrefix,
},
{
[]string{"rclonelayout", "rclone_layout"},
"Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
&s.configRcloneLayout,
&defaultRcloneLayout,
},
}
}
@@ -278,8 +360,8 @@ func (s *server) queryConfigs() error {
// Send a "GETCONFIG" message for each required config and parse git-annex's
// "VALUE" response.
queryNextConfig:
for _, config := range requiredConfigs {
for _, config := range s.getRequiredConfigs() {
var valueReceived bool
// Try each of the config's names in sequence, starting with the
// canonical name.
for _, configName := range config.names {
@@ -295,15 +377,19 @@ queryNextConfig:
return fmt.Errorf("failed to parse config value: %s %s", valueKeyword, message.line)
}
if value := message.finalParameter(); value != "" {
s.mustSetConfigValue(config.id, value)
continue queryNextConfig
value := message.finalParameter()
if value != "" {
*config.destination = value
valueReceived = true
break
}
}
if config.defaultValue == "" {
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
if !valueReceived {
if config.defaultValue == nil {
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
}
*config.destination = *config.defaultValue
}
s.mustSetConfigValue(config.id, config.defaultValue)
}
s.configsDone = true
@@ -322,7 +408,7 @@ func (s *server) handlePrepare() error {
// Git-annex is asking us to return the list of settings that we use. Keep this
// in sync with `handlePrepare()`.
func (s *server) handleListConfigs() {
for _, config := range requiredConfigs {
for _, config := range s.getRequiredConfigs() {
s.sendMsg(fmt.Sprintf("CONFIG %s %s", config.getCanonicalName(), config.fullDescription()))
}
s.sendMsg("CONFIGEND")

View File

@@ -10,6 +10,7 @@ import (
"regexp"
"runtime"
"strings"
"sync"
"testing"
"time"
@@ -190,10 +191,14 @@ func TestMessageParser(t *testing.T) {
}
func TestConfigDefinitionOneName(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo"},
description: "The foo config is utterly useless.",
defaultValue: "abc",
destination: &parsed,
defaultValue: &defaultValue,
}
assert.Equal(t, "foo",
@@ -205,10 +210,14 @@ func TestConfigDefinitionOneName(t *testing.T) {
}
func TestConfigDefinitionTwoNames(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo", "bar"},
description: "The foo config is utterly useless.",
defaultValue: "abc",
destination: &parsed,
defaultValue: &defaultValue,
}
assert.Equal(t, "foo",
@@ -220,10 +229,14 @@ func TestConfigDefinitionTwoNames(t *testing.T) {
}
func TestConfigDefinitionThreeNames(t *testing.T) {
var parsed string
var defaultValue = "abc"
configFoo := configDefinition{
names: []string{"foo", "bar", "baz"},
description: "The foo config is utterly useless.",
defaultValue: "abc",
destination: &parsed,
defaultValue: &defaultValue,
}
assert.Equal(t, "foo",
@@ -239,9 +252,6 @@ type testState struct {
server *server
mockStdinW *io.PipeWriter
mockStdoutReader *bufio.Reader
// readLineTimeout is the maximum duration of time to wait for [server] to
// write a line to be written to the mock stdout.
readLineTimeout time.Duration
fstestRun *fstest.Run
remoteName string
@@ -260,11 +270,6 @@ func makeTestState(t *testing.T) testState {
},
mockStdinW: stdinW,
mockStdoutReader: bufio.NewReader(stdoutR),
// The default readLineTimeout must be large enough to accommodate slow
// operations on real remotes. Without a timeout, attempts to read a
// line that's never written would block indefinitely.
readLineTimeout: time.Second * 30,
}
}
@@ -272,52 +277,18 @@ func (h *testState) requireRemoteIsEmpty() {
h.fstestRun.CheckRemoteItems(h.t)
}
// readLineWithTimeout attempts to read a line from the mock stdout. Returns an
// error if the read operation times out or fails for any reason.
func (h *testState) readLineWithTimeout() (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), h.readLineTimeout)
defer cancel()
lineChan := make(chan string)
errChan := make(chan error)
go func() {
line, err := h.mockStdoutReader.ReadString('\n')
if err != nil {
errChan <- err
} else {
lineChan <- line
}
}()
select {
case line := <-lineChan:
return line, nil
case err := <-errChan:
return "", err
case <-ctx.Done():
return "", fmt.Errorf("attempt to read line timed out: %w", ctx.Err())
}
}
// requireReadLineExact requires that a line matching wantLine can be read from
// the mock stdout.
func (h *testState) requireReadLineExact(wantLine string) {
receivedLine, err := h.readLineWithTimeout()
func (h *testState) requireReadLineExact(line string) {
receivedLine, err := h.mockStdoutReader.ReadString('\n')
require.NoError(h.t, err)
require.Equal(h.t, wantLine+"\n", receivedLine)
require.Equal(h.t, line+"\n", receivedLine)
}
// requireReadLine requires that a line can be read from the mock stdout and
// returns the line.
func (h *testState) requireReadLine() string {
receivedLine, err := h.readLineWithTimeout()
receivedLine, err := h.mockStdoutReader.ReadString('\n')
require.NoError(h.t, err)
return receivedLine
}
// requireWriteLine requires that the given line is successfully written to the
// mock stdin.
func (h *testState) requireWriteLine(line string) {
_, err := h.mockStdinW.Write([]byte(line + "\n"))
require.NoError(h.t, err)
@@ -491,7 +462,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
@@ -501,35 +472,6 @@ var fstestTestCases = []testCase{
require.NoError(t, h.mockStdinW.Close())
},
},
{
label: "HandlesPrepareWithUnknownLayout",
testProtocolFunc: func(t *testing.T, h *testState) {
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
h.requireReadLineExact("EXTENSIONS")
require.True(t, h.server.extensionInfo)
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE " + h.remoteName)
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE nonexistentLayoutMode")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
require.Equal(t, h.server.configPrefix, h.remotePrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE unknown layout mode: nonexistentLayoutMode")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "unknown layout mode: nonexistentLayoutMode",
},
{
label: "HandlesPrepareWithNonexistentRemote",
testProtocolFunc: func(t *testing.T, h *testState) {
@@ -545,7 +487,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, "thisRemoteDoesNotExist")
@@ -553,11 +495,11 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist: thisRemoteDoesNotExist")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist",
expectedError: "remote does not exist: thisRemoteDoesNotExist",
},
{
label: "HandlesPrepareWithPathAsRemote",
@@ -574,7 +516,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remotePrefix)
@@ -584,13 +526,13 @@ var fstestTestCases = []testCase{
h.requireWriteLine("INITREMOTE")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: "),
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist: "),
h.requireReadLine(),
)
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist or incorrectly contains a path:",
expectedError: "remote does not exist:",
},
{
label: "HandlesPrepareWithNonexistentBackendAsRemote",
@@ -602,7 +544,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":nonexistentBackend:", h.server.configRcloneRemoteName)
@@ -626,7 +568,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local:", h.server.configRcloneRemoteName)
@@ -649,7 +591,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local", h.server.configRcloneRemoteName)
@@ -657,11 +599,11 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed: :local")
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed as a backend: :local")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote could not be parsed:",
expectedError: "remote could not be parsed as a backend:",
},
{
label: "HandlesPrepareWithBackendContainingOptionsAsRemote",
@@ -673,7 +615,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local,description=banana:", h.server.configRcloneRemoteName)
@@ -696,7 +638,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, ":local,description=banana:/bad/path", h.server.configRcloneRemoteName)
@@ -704,38 +646,14 @@ var fstestTestCases = []testCase{
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: :local,description=banana:/bad/path")
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "remote does not exist or incorrectly contains a path:",
},
{
label: "HandlesPrepareWithRemoteContainingOptions",
testProtocolFunc: func(t *testing.T, h *testState) {
const envVar = "RCLONE_CONFIG_fake_remote_TYPE"
require.NoError(t, os.Setenv(envVar, "memory"))
t.Cleanup(func() { require.NoError(t, os.Unsetenv(envVar)) })
h.requireReadLineExact("VERSION 1")
h.requireWriteLine("PREPARE")
h.requireReadLineExact("GETCONFIG rcloneremotename")
h.requireWriteLine("VALUE fake_remote,banana=yes:")
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE /foo")
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, "fake_remote,banana=yes:", h.server.configRcloneRemoteName)
require.Equal(t, "/foo", h.server.configPrefix)
require.True(t, h.server.configsDone)
h.requireWriteLine("INITREMOTE")
h.requireReadLineExact("INITREMOTE-SUCCESS")
require.Regexp(t,
regexp.MustCompile("^INITREMOTE-FAILURE backend must not have a path: "),
h.requireReadLine(),
)
require.NoError(t, h.mockStdinW.Close())
},
expectedError: "backend must not have a path:",
},
{
label: "HandlesPrepareWithSynonyms",
@@ -756,7 +674,7 @@ var fstestTestCases = []testCase{
h.requireReadLineExact("GETCONFIG rcloneprefix")
h.requireWriteLine("VALUE " + h.remotePrefix)
h.requireReadLineExact("GETCONFIG rclonelayout")
h.requireWriteLine("VALUE frankencase")
h.requireWriteLine("VALUE foo")
h.requireReadLineExact("PREPARE-SUCCESS")
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
@@ -1363,46 +1281,6 @@ var fstestTestCases = []testCase{
},
}
// TestReadLineHasShortDeadline verifies that [testState.readLineWithTimeout]
// does not block indefinitely when a line is never written.
func TestReadLineHasShortDeadline(t *testing.T) {
const timeoutForRead = time.Millisecond * 50
const timeoutForTest = time.Millisecond * 100
const tickDuration = time.Millisecond * 10
type readLineResult struct {
line string
err error
}
resultChan := make(chan readLineResult)
go func() {
defer close(resultChan)
h := makeTestState(t)
h.readLineTimeout = timeoutForRead
line, err := h.readLineWithTimeout()
resultChan <- readLineResult{line, err}
}()
// This closure will be run periodically until time runs out or until all of
// its assertions pass.
idempotentConditionFunc := func(c *assert.CollectT) {
result, ok := <-resultChan
require.True(c, ok, "The goroutine should send a result")
require.Empty(c, result.line, "No line should be read")
require.ErrorIs(c, result.err, context.DeadlineExceeded)
_, ok = <-resultChan
require.False(c, ok, "The channel should be closed")
}
require.EventuallyWithT(t, idempotentConditionFunc, timeoutForTest, tickDuration)
}
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
@@ -1433,27 +1311,23 @@ func TestGitAnnexFstestBackendCases(t *testing.T) {
handle.remoteName = remoteName
handle.remotePrefix = remotePath
serverErrorChan := make(chan error)
var wg sync.WaitGroup
wg.Add(1)
go func() {
// Run the gitannex server and send the result back to the
// goroutine associated with `t`. We can't use `require` here
// because it could call `t.FailNow()`, which says it must be
// called on the goroutine associated with the test.
serverErrorChan <- handle.server.run()
err := handle.server.run()
if testCase.expectedError == "" {
require.NoError(t, err)
} else {
require.ErrorContains(t, err, testCase.expectedError)
}
wg.Done()
}()
defer wg.Wait()
testCase.testProtocolFunc(t, &handle)
serverError, ok := <-serverErrorChan
require.True(t, ok, "Should receive one error/nil from server")
require.Empty(t, serverErrorChan)
if testCase.expectedError == "" {
require.NoError(t, serverError)
} else {
require.ErrorContains(t, serverError, testCase.expectedError)
}
})
}
}

View File

@@ -191,6 +191,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
})
cobra.OnInitialize(initConfig)
}
// Traverse the tree of commands running fn on each

View File

@@ -6,8 +6,6 @@ package ncdu
import (
"context"
"fmt"
"log/slog"
"os"
"path"
"reflect"
"sort"
@@ -927,19 +925,23 @@ func (u *UI) Run() error {
return fmt.Errorf("screen init: %w", err)
}
// Hijack log output so that it doesn't corrupt the screen.
if !log.Redirected() {
var logs []string
log.Handler.SetOutput(func(level slog.Level, text string) {
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
if logOutput := fs.LogOutput; !log.Redirected() {
type log struct {
text string
level fs.LogLevel
}
var logs []log
fs.LogOutput = func(level fs.LogLevel, text string) {
if len(logs) > 100 {
logs = logs[len(logs)-100:]
}
logs = append(logs, text)
})
logs = append(logs, log{level: level, text: text})
}
defer func() {
log.Handler.ResetOutput()
for _, text := range logs {
_, _ = os.Stderr.WriteString(text)
fs.LogOutput = logOutput
for i := range logs {
logOutput(logs[i].level, logs[i].text)
}
}()
}

View File

@@ -11,8 +11,6 @@ import (
"testing"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfstest"
"github.com/stretchr/testify/require"
@@ -40,7 +38,7 @@ func TestMount(t *testing.T) {
nfs.Opt.HandleCacheDir = t.TempDir()
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
// Check we can create a handler
_, err := nfs.NewHandler(context.Background(), vfs.New(object.MemoryFs, nil), &nfs.Opt)
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
}

View File

@@ -5,11 +5,11 @@ package cmd
import (
"bytes"
"fmt"
"log/slog"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/operations"
@@ -19,6 +19,8 @@ import (
const (
// interval between progress prints
defaultProgressInterval = 500 * time.Millisecond
// time format for logging
logTimeFormat = "2006/01/02 15:04:05"
)
// startProgress starts the progress bar printing
@@ -26,13 +28,15 @@ const (
// It returns a func which should be called to stop the stats.
func startProgress() func() {
stopStats := make(chan struct{})
oldLogOutput := fs.LogOutput
oldSyncPrint := operations.SyncPrintf
if !log.Redirected() {
// Intercept the log calls if not logging to file or syslog
log.Handler.SetOutput(func(level slog.Level, text string) {
printProgress(text)
})
fs.LogOutput = func(level fs.LogLevel, text string) {
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
}
}
// Intercept output from functions such as HashLister to stdout
@@ -56,10 +60,7 @@ func startProgress() func() {
case <-stopStats:
ticker.Stop()
printProgress("")
if !log.Redirected() {
// Reset intercept of the log calls
log.Handler.ResetOutput()
}
fs.LogOutput = oldLogOutput
operations.SyncPrintf = oldSyncPrint
fmt.Println("")
return

View File

@@ -3,7 +3,6 @@ package dlna
import (
"bytes"
"context"
"encoding/xml"
"fmt"
"net"
@@ -20,12 +19,9 @@ import (
"github.com/anacrolix/dms/upnp"
"github.com/anacrolix/log"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/dlna/data"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/systemd"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
@@ -33,63 +29,9 @@ import (
"github.com/spf13/cobra"
)
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: ":7879",
Help: "The ip:port or :port to bind the DLNA http server to",
}, {
Name: "name",
Default: "",
Help: "Name of DLNA server",
}, {
Name: "log_trace",
Default: false,
Help: "Enable trace logging of SOAP traffic",
}, {
Name: "interface",
Default: []string{},
Help: "The interface to use for SSDP (repeat as necessary)",
}, {
Name: "announce_interval",
Default: fs.Duration(12 * time.Minute),
Help: "The interval between SSDP announcements",
}}
// Options is the type for DLNA serving options.
type Options struct {
ListenAddr string `config:"addr"`
FriendlyName string `config:"name"`
LogTrace bool `config:"log_trace"`
InterfaceNames []string `config:"interface"`
AnnounceInterval fs.Duration `config:"announce_interval"`
}
// Opt contains the options for DLNA serving.
var Opt Options
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
vfsflags.AddFlags(flagSet)
serve.Command.AddCommand(Command)
serve.AddRc("dlna", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt)
})
dlnaflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
}
// Command definition for cobra.
@@ -111,19 +53,7 @@ Rclone will add external subtitle files (.srt) to videos if they have the same
filename as the video file itself (except the extension), either in the same
directory as the video, or in a "Subs" subdirectory.
### Server options
Use ` + "`--addr`" + ` to specify which IP address and port the server should
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
IPs.
Use ` + "`--name`" + ` to choose the friendly server name, which is by
default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
` + vfs.Help(),
` + dlnaflags.Help + vfs.Help(),
Annotations: map[string]string{
"versionIntroduced": "v1.46",
"groups": "Filter",
@@ -133,12 +63,16 @@ logging of all UPNP traffic.
f := cmd.NewFsSrc(args)
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt)
s, err := newServer(f, &dlnaflags.Opt)
if err != nil {
return err
}
if err := s.Serve(); err != nil {
return err
}
defer systemd.Notify()()
return s.Serve()
s.Wait()
return nil
})
},
}
@@ -174,7 +108,7 @@ type server struct {
vfs *vfs.VFS
}
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options) (*server, error) {
func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
friendlyName := opt.FriendlyName
if friendlyName == "" {
friendlyName = makeDefaultFriendlyName()
@@ -203,7 +137,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
waitChan: make(chan struct{}),
httpListenAddr: opt.ListenAddr,
f: f,
vfs: vfs.New(f, vfsOpt),
vfs: vfs.New(f, &vfscommon.Opt),
}
s.services = map[string]UPnPService{
@@ -234,19 +168,6 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
http.FileServer(data.Assets))))
s.handler = logging(withHeader("Server", serverField, r))
// Currently, the SSDP server only listens on an IPv4 multicast address.
// Differentiate between two INADDR_ANY addresses,
// so that 0.0.0.0 can only listen on IPv4 addresses.
network := "tcp4"
if strings.Count(s.httpListenAddr, ":") > 1 {
network = "tcp"
}
listener, err := net.Listen(network, s.httpListenAddr)
if err != nil {
return nil, err
}
s.HTTPConn = listener
return s, nil
}
@@ -367,9 +288,24 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
http.ServeContent(w, r, remotePath, node.ModTime(), in)
}
// Serve runs the server - returns the error only if the listener was
// not started. Blocks until the server is closed.
// Serve runs the server - returns the error only if
// the listener was not started; does not block, so
// use s.Wait() to block on the listener indefinitely.
func (s *server) Serve() (err error) {
if s.HTTPConn == nil {
// Currently, the SSDP server only listens on an IPv4 multicast address.
// Differentiate between two INADDR_ANY addresses,
// so that 0.0.0.0 can only listen on IPv4 addresses.
network := "tcp4"
if strings.Count(s.httpListenAddr, ":") > 1 {
network = "tcp"
}
s.HTTPConn, err = net.Listen(network, s.httpListenAddr)
if err != nil {
return
}
}
go func() {
s.startSSDP()
}()
@@ -383,7 +319,6 @@ func (s *server) Serve() (err error) {
}
}()
s.Wait()
return nil
}
@@ -392,19 +327,13 @@ func (s *server) Wait() {
<-s.waitChan
}
// Shutdown the DLNA server
func (s *server) Shutdown() error {
func (s *server) Close() {
err := s.HTTPConn.Close()
close(s.waitChan)
if err != nil {
return fmt.Errorf("failed to shutdown DLNA server: %w", err)
fs.Errorf(s.f, "Error closing HTTP server: %v", err)
return
}
return nil
}
// Return the first address of the server
func (s *server) Addr() net.Addr {
return s.HTTPConn.Addr()
close(s.waitChan)
}
// Run SSDP (multicast for server discovery) on all interfaces.

View File

@@ -13,13 +13,11 @@ import (
"github.com/anacrolix/dms/soap"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -35,14 +33,12 @@ const (
)
func startServer(t *testing.T, f fs.Fs) {
opt := Opt
opt := dlnaflags.Opt
opt.ListenAddr = testBindAddress
var err error
dlnaServer, err = newServer(context.Background(), f, &opt, &vfscommon.Opt)
dlnaServer, err = newServer(f, &opt)
assert.NoError(t, err)
go func() {
assert.NoError(t, dlnaServer.Serve())
}()
assert.NoError(t, dlnaServer.Serve())
baseURL = "http://" + dlnaServer.HTTPConn.Addr().String()
}
@@ -275,10 +271,3 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
}
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "dlna",
"vfs_cache_mode": "off",
})
}

View File

@@ -0,0 +1,69 @@
// Package dlnaflags provides utility functionality to DLNA.
package dlnaflags
import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
)
// Help contains the text for the command line help and manual.
var Help = `### Server options
Use ` + "`--addr`" + ` to specify which IP address and port the server should
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
IPs.
Use ` + "`--name`" + ` to choose the friendly server name, which is by
default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
`
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "addr",
Default: ":7879",
Help: "The ip:port or :port to bind the DLNA http server to",
}, {
Name: "name",
Default: "",
Help: "Name of DLNA server",
}, {
Name: "log_trace",
Default: false,
Help: "Enable trace logging of SOAP traffic",
}, {
Name: "interface",
Default: []string{},
Help: "The interface to use for SSDP (repeat as necessary)",
}, {
Name: "announce_interval",
Default: fs.Duration(12 * time.Minute),
Help: "The interval between SSDP announcements",
}}
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
}
// Options is the type for DLNA serving options.
type Options struct {
ListenAddr string `config:"addr"`
FriendlyName string `config:"name"`
LogTrace bool `config:"log_trace"`
InterfaceNames []string `config:"interface"`
AnnounceInterval fs.Duration `config:"announce_interval"`
}
// Opt contains the options for DLNA serving.
var Opt Options
// AddFlags add the command line flags for DLNA serving.
func AddFlags(flagSet *pflag.FlagSet) {
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
}

View File

@@ -12,7 +12,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
@@ -51,8 +50,6 @@ func init() {
// Add common mount/vfs flags
mountlib.AddFlags(cmdFlags)
vfsflags.AddFlags(cmdFlags)
// Register with parent command
serve.Command.AddCommand(Command)
}
// Command definition for cobra

View File

@@ -18,16 +18,13 @@ import (
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
@@ -73,8 +70,8 @@ type Options struct {
ListenAddr string `config:"addr"` // Port to listen on
PublicIP string `config:"public_ip"` // Passive ports range
PassivePorts string `config:"passive_port"` // Passive ports range
User string `config:"user"` // single username for basic auth if not using Htpasswd
Pass string `config:"pass"` // password for User
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
BasicPass string `config:"pass"` // password for BasicUser
TLSCert string `config:"cert"` // TLS PEM key (concatenation of certificate and CA certificate)
TLSKey string `config:"key"` // TLS PEM Private key
}
@@ -91,29 +88,6 @@ func init() {
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
serve.Command.AddCommand(Command)
serve.AddRc("ftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@@ -147,18 +121,18 @@ You can set a single username and password with the --user and --pass flags.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxy.Opt.AuthProxy == "" {
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
s, err := newServer(context.Background(), f, &Opt)
if err != nil {
return err
}
return s.Serve()
return s.serve()
})
},
}
@@ -183,7 +157,7 @@ func init() {
var passivePortsRe = regexp.MustCompile(`^\s*\d+\s*-\s*\d+\s*$`)
// Make a new FTP to serve the remote
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*driver, error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
host, port, err := net.SplitHostPort(opt.ListenAddr)
if err != nil {
return nil, fmt.Errorf("failed to parse host:port from %q", opt.ListenAddr)
@@ -198,11 +172,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
ctx: ctx,
opt: *opt,
}
if proxy.Opt.AuthProxy != "" {
d.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
if proxyflags.Opt.AuthProxy != "" {
d.proxy = proxy.New(ctx, &proxyflags.Opt)
d.userPass = make(map[string]string, 16)
} else {
d.globalVFS = vfs.New(f, vfsOpt)
d.globalVFS = vfs.New(f, &vfscommon.Opt)
}
d.useTLS = d.opt.TLSKey != ""
@@ -234,58 +208,20 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
return d, nil
}
// Serve runs the FTP server until it is shutdown
func (d *driver) Serve() error {
// serve runs the ftp server
func (d *driver) serve() error {
fs.Logf(d.f, "Serving FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
err := d.srv.ListenAndServe()
if err == ftp.ErrServerClosed {
err = nil
}
return err
return d.srv.ListenAndServe()
}
// Shutdown stops the ftp server
// close stops the ftp server
//
//lint:ignore U1000 unused when not building linux
func (d *driver) Shutdown() error {
func (d *driver) close() error {
fs.Logf(d.f, "Stopping FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
return d.srv.Shutdown()
}
// Return the first address of the server
func (d *driver) Addr() net.Addr {
// The FTP server doesn't let us read the listener
// so we have to synthesize the net.Addr here.
// On errors we'll return a zero item or zero parts.
addr := &net.TCPAddr{}
// Split host and port
host, port, err := net.SplitHostPort(d.opt.ListenAddr)
if err != nil {
fs.Errorf(nil, "ftp: addr: invalid address format: %v", err)
return addr
}
// Parse port
addr.Port, err = strconv.Atoi(port)
if err != nil {
fs.Errorf(nil, "ftp: addr: invalid port number: %v", err)
}
// Resolve the host to an IP address.
ipAddrs, err := net.LookupIP(host)
if err != nil {
fs.Errorf(nil, "ftp: addr: failed to resolve host: %v", err)
} else if len(ipAddrs) == 0 {
fs.Errorf(nil, "ftp: addr: no IP addresses found for host: %s", host)
} else {
// Choose the first IP address.
addr.IP = ipAddrs[0]
}
return addr
}
// Logger ftp logger output formatted message
type Logger struct{}
@@ -333,7 +269,7 @@ func (d *driver) CheckPasswd(sctx *ftp.Context, user, pass string) (ok bool, err
d.userPass[user] = oPass
d.userPassMu.Unlock()
} else {
ok = d.opt.User == user && (d.opt.Pass == "" || d.opt.Pass == pass)
ok = d.opt.BasicUser == user && (d.opt.BasicPass == "" || d.opt.BasicPass == pass)
if !ok {
fs.Infof(nil, "login failed: bad credentials")
return false, nil

View File

@@ -12,15 +12,12 @@ import (
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/israce"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
ftp "goftp.io/server/v2"
)
const (
@@ -39,16 +36,19 @@ func TestFTP(t *testing.T) {
opt := Opt
opt.ListenAddr = testHOST + ":" + testPORT
opt.PassivePorts = testPASSIVEPORTRANGE
opt.User = testUSER
opt.Pass = testPASS
opt.BasicUser = testUSER
opt.BasicPass = testPASS
w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
w, err := newServer(context.Background(), f, &opt)
assert.NoError(t, err)
quit := make(chan struct{})
go func() {
assert.NoError(t, w.Serve())
err := w.serve()
close(quit)
if err != ftp.ErrServerClosed {
assert.NoError(t, err)
}
}()
// Config for the backend we'll use to connect to the server
@@ -61,7 +61,7 @@ func TestFTP(t *testing.T) {
}
return config, func() {
err := w.Shutdown()
err := w.close()
assert.NoError(t, err)
<-quit
}
@@ -69,13 +69,3 @@ func TestFTP(t *testing.T) {
servetest.Run(t, "ftp", start)
}
func TestRc(t *testing.T) {
if israce.Enabled {
t.Skip("Skipping under race detector as underlying library is racy")
}
servetest.TestRc(t, rc.Params{
"type": "ftp",
"vfs_cache_mode": "off",
})
}

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"net"
"net/http"
"os"
"path"
@@ -16,14 +15,10 @@ import (
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/systemd"
@@ -33,12 +28,6 @@ import (
"github.com/spf13/cobra"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo).
Add(libhttp.TemplateConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
@@ -56,42 +45,17 @@ var DefaultOpt = Options{
// Opt is options set by command line flags
var Opt = DefaultOpt
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo})
}
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, flagPrefix, &Opt.Template)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@@ -117,7 +81,7 @@ control the stats printing.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxy.Opt.AuthProxy == "" {
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
@@ -125,12 +89,14 @@ control the stats printing.
}
cmd.Run(false, true, command, func() error {
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
s, err := run(context.Background(), f, Opt)
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
}
defer systemd.Notify()()
return s.Serve()
s.server.Wait()
return nil
})
},
}
@@ -170,19 +136,19 @@ func (s *HTTP) auth(user, pass string) (value any, err error) {
return VFS, err
}
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *HTTP, err error) {
func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
s = &HTTP{
f: f,
ctx: ctx,
opt: *opt,
opt: opt,
}
if proxyOpt.AuthProxy != "" {
s.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, &proxyflags.Opt)
// override auth
s.opt.Auth.CustomAuthFn = s.auth
} else {
s._vfs = vfs.New(f, vfsOpt)
s._vfs = vfs.New(f, &vfscommon.Opt)
}
s.server, err = libhttp.NewServer(ctx,
@@ -202,24 +168,9 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
router.Get("/*", s.handler)
router.Head("/*", s.handler)
return s, nil
}
// Serve HTTP until the server is shutdown
func (s *HTTP) Serve() error {
s.server.Serve()
s.server.Wait()
return nil
}
// Addr returns the first address of the server
func (s *HTTP) Addr() net.Addr {
return s.server.Addr()
}
// Shutdown the server
func (s *HTTP) Shutdown() error {
return s.server.Shutdown()
return s, nil
}
// handler reads incoming requests and dispatches them

View File

@@ -12,13 +12,10 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -42,16 +39,13 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
},
}
opts.HTTP.ListenAddr = []string{testBindAddress}
if proxy.Opt.AuthProxy == "" {
if proxyflags.Opt.AuthProxy == "" {
opts.Auth.BasicUser = testUser
opts.Auth.BasicPass = testPass
}
s, err := newServer(ctx, f, &opts, &vfscommon.Opt, &proxy.Opt)
s, err := run(ctx, f, opts)
require.NoError(t, err, "failed to start server")
go func() {
require.NoError(t, s.Serve())
}()
urls := s.server.URLs()
require.Len(t, urls, 1, "expected one URL")
@@ -116,9 +110,9 @@ func testGET(t *testing.T, useProxy bool) {
cmd := "go run " + prog + " " + files
// FIXME this is untidy setting a global variable!
proxy.Opt.AuthProxy = cmd
proxyflags.Opt.AuthProxy = cmd
defer func() {
proxy.Opt.AuthProxy = ""
proxyflags.Opt.AuthProxy = ""
}()
f = nil
@@ -273,10 +267,3 @@ func TestGET(t *testing.T) {
func TestAuthProxy(t *testing.T) {
testGET(t, true)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "http",
"vfs_cache_mode": "off",
})
}

View File

@@ -3,7 +3,6 @@
package nfs
import (
"bytes"
"crypto/md5"
"encoding/hex"
"errors"
@@ -31,15 +30,6 @@ var (
ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH")
)
// Metadata files have the file handle of their source file with this
// suffixed so we can look them up directly from the file handle.
//
// Note that this is 4 bytes - using a non multiple of 4 will cause
// the Linux NFS client not to be able to read any files.
//
// The value is big endian 0x00000001
var metadataSuffix = []byte{0x00, 0x00, 0x00, 0x01}
// Cache controls the file handle cache implementation
type Cache interface {
// ToHandle takes a file and represents it with an opaque handle to reference it.
@@ -87,9 +77,7 @@ type diskHandler struct {
write func(fh []byte, cachePath string, fullPath string) ([]byte, error)
read func(fh []byte, cachePath string) ([]byte, error)
remove func(fh []byte, cachePath string) error
suffix func(fh []byte) []byte // returns nil for no suffix or the suffix
handleType int32 //nolint:unused // used by the symlink cache
metadata string // extension for metadata
handleType int32 //nolint:unused // used by the symlink cache
}
// Create a new disk handler
@@ -114,8 +102,6 @@ func newDiskHandler(h *Handler) (dh *diskHandler, err error) {
write: dh.diskCacheWrite,
read: dh.diskCacheRead,
remove: dh.diskCacheRemove,
suffix: dh.diskCacheSuffix,
metadata: h.vfs.Opt.MetadataExtension,
}
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
return dh, nil
@@ -138,17 +124,6 @@ func (dh *diskHandler) handleToPath(fh []byte) (cachePath string) {
return cachePath
}
// Return true if name represents a metadata file
//
// It returns the underlying path
func (dh *diskHandler) isMetadataFile(name string) (rawName string, found bool) {
if dh.metadata == "" {
return name, false
}
rawName, found = strings.CutSuffix(name, dh.metadata)
return rawName, found
}
// ToHandle takes a file and represents it with an opaque handle to reference it.
// In stateless nfs (when it's serving a unix fs) this can be the device + inode
// but we can generalize with a stateful local cache of handed out IDs.
@@ -156,8 +131,6 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
dh.mu.Lock()
defer dh.mu.Unlock()
fullPath := path.Join(splitPath...)
// metadata file has file handle of original file
fullPath, isMetadataFile := dh.isMetadataFile(fullPath)
fh = hashPath(fullPath)
cachePath := dh.handleToPath(fh)
cacheDir := filepath.Dir(cachePath)
@@ -171,10 +144,6 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
return fh
}
// metadata file handle is suffixed with metadataSuffix
if isMetadataFile {
fh = append(fh, metadataSuffix...)
}
return fh
}
@@ -183,43 +152,18 @@ func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath stri
return fh, os.WriteFile(cachePath, []byte(fullPath), 0600)
}
var (
errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
)
// Test to see if a fh is a metadata handle and if so return the underlying handle
func (dh *diskHandler) isMetadataHandle(fh []byte) (isMetadata bool, newFh []byte, err error) {
if dh.metadata == "" {
return false, fh, nil
}
suffix := dh.suffix(fh)
if len(suffix) == 0 {
// OK
return false, fh, nil
} else if bytes.Equal(suffix, metadataSuffix) {
return true, fh[:len(fh)-len(suffix)], nil
}
fs.Errorf("nfs", "Bad file handle suffix %X", suffix)
return false, nil, errStaleHandle
}
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
// FromHandle converts from an opaque handle to the file it represents
func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []string, err error) {
dh.mu.RLock()
defer dh.mu.RUnlock()
isMetadata, fh, err := dh.isMetadataHandle(fh)
if err != nil {
return nil, nil, err
}
cachePath := dh.handleToPath(fh)
fullPathBytes, err := dh.read(fh, cachePath)
if err != nil {
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
return nil, nil, errStaleHandle
}
if isMetadata {
fullPathBytes = append(fullPathBytes, []byte(dh.metadata)...)
}
splitPath = strings.Split(string(fullPathBytes), "/")
return dh.billyFS, splitPath, nil
}
@@ -233,16 +177,8 @@ func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
dh.mu.Lock()
defer dh.mu.Unlock()
isMetadata, fh, err := dh.isMetadataHandle(fh)
if err != nil {
return err
}
if isMetadata {
// Can't invalidate a metadata handle as it is synthetic
return nil
}
cachePath := dh.handleToPath(fh)
err = dh.remove(fh, cachePath)
err := dh.remove(fh, cachePath)
if err != nil {
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
}
@@ -254,14 +190,6 @@ func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error {
return os.Remove(cachePath)
}
// Return a suffix for the file handle or nil
func (dh *diskHandler) diskCacheSuffix(fh []byte) []byte {
if len(fh) <= md5.Size {
return nil
}
return fh[md5.Size:]
}
// HandleLimit exports how many file handles can be safely stored by this cache.
func (dh *diskHandler) HandleLimit() int {
return math.MaxInt

View File

@@ -5,13 +5,10 @@ package nfs
import (
"context"
"fmt"
"strings"
"sync"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/vfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -21,8 +18,6 @@ const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs
// Check basic CRUD operations
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
isMetadata := strings.HasSuffix(fileName, ".metadata")
// Check reading a non existent handle returns an error
_, _, err := c.FromHandle([]byte{10})
assert.Error(t, err)
@@ -31,11 +26,6 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
splitPath := []string{"dir", fileName}
fh := c.ToHandle(h.billyFS, splitPath)
assert.True(t, len(fh) > 0)
if isMetadata {
assert.Equal(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):])
} else {
assert.NotEqual(t, metadataSuffix, fh[len(fh)-len(metadataSuffix):])
}
// Read the handle back
newFs, newSplitPath, err := c.FromHandle(fh)
@@ -53,13 +43,8 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
// Check the handle is gone and returning stale handle error
_, _, err = c.FromHandle(fh)
if !isMetadata {
require.Error(t, err)
assert.Equal(t, errStaleHandle, err)
} else {
// Can't invalidate metadata handles
require.NoError(t, err)
}
require.Error(t, err)
assert.Equal(t, errStaleHandle, err)
}
// Thrash the cache operations in parallel on different files
@@ -128,10 +113,8 @@ func TestCache(t *testing.T) {
cacheType := cacheType
t.Run(cacheType.String(), func(t *testing.T) {
h := &Handler{
vfs: vfs.New(object.MemoryFs, nil),
billyFS: billyFS,
}
h.vfs.Opt.MetadataExtension = ".metadata"
h.opt.HandleLimit = 1000
h.opt.HandleCache = cacheType
h.opt.HandleCacheDir = t.TempDir()
@@ -168,10 +151,6 @@ func TestCache(t *testing.T) {
t.Run("ThrashSame", func(t *testing.T) {
testCacheThrashSame(t, h, c)
})
// Metadata file handles only supported on non memory
t.Run("CRUDMetadata", func(t *testing.T) {
testCacheCRUD(t, h, c, "file.metadata")
})
}
})
}

View File

@@ -14,11 +14,8 @@ import (
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
@@ -86,24 +83,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
func init() {
vfsflags.AddFlags(Command.Flags())
AddFlags(Command.Flags())
serve.Command.AddCommand(Command)
serve.AddRc("nfs", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Create VFS
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
VFS := vfs.New(f, &vfsOpt)
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return NewServer(ctx, VFS, &opt)
})
}
// Run the command
@@ -190,12 +169,6 @@ Where |$PORT| is the same port number used in the |serve nfs| command
and |$HOSTNAME| is the network address of the machine that |serve nfs|
was run on.
If |--vfs-metadata-extension| is in use then for the |--nfs-cache-type disk|
and |--nfs-cache-type cache| the metadata files will have the file
handle of their parent file suffixed with |0x00, 0x00, 0x00, 0x01|.
This means they can be looked up directly from the parent file handle
is desired.
This command is only available on Unix platforms.
`, "|", "`") + vfs.Help(),

View File

@@ -1,19 +0,0 @@
//go:build unix
// The serving is tested in cmd/nfsmount - here we test anything else
package nfs
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs/rc"
)
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "nfs",
"vfs_cache_mode": "off",
})
}

View File

@@ -27,7 +27,6 @@ package nfs
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"os"
@@ -82,36 +81,10 @@ func (dh *diskHandler) makeSymlinkCache() error {
dh.read = dh.symlinkCacheRead
dh.write = dh.symlinkCacheWrite
dh.remove = dh.symlinkCacheRemove
dh.suffix = dh.symlinkCacheSuffix
return nil
}
// Prefixes a []byte with its length as a 4-byte big-endian integer.
func addLengthPrefix(data []byte) []byte {
length := uint32(len(data))
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.BigEndian, length)
if err != nil {
// This should never fail
panic(err)
}
buf.Write(data)
return buf.Bytes()
}
// Removes the 4-byte big-endian length prefix from a []byte.
func removeLengthPrefix(data []byte) ([]byte, error) {
if len(data) < 4 {
return nil, errors.New("file handle too short")
}
length := binary.BigEndian.Uint32(data[:4])
if int(length) != len(data)-4 {
return nil, errors.New("file handle invalid length")
}
return data[4 : 4+length], nil
}
// Write the fullPath into cachePath returning the possibly updated fh
//
// This writes the fullPath into the file with the cachePath given and
@@ -142,8 +115,7 @@ func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath s
dh.handleType = handle.Type()
}
// Adjust the raw handle so it has a length prefix
return addLengthPrefix(handle.Bytes()), nil
return handle.Bytes(), nil
}
// Read the contents of (fh, cachePath)
@@ -156,12 +128,6 @@ func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath s
func (dh *diskHandler) symlinkCacheRead(fh []byte, cachePath string) (fullPath []byte, err error) {
//defer log.Trace(nil, "fh=%x, cachePath=%q", fh, cachePath)("fullPath=%q, err=%v", &fullPath, &err)
// First check and remove the file handle prefix length
fh, err = removeLengthPrefix(fh)
if err != nil {
return nil, fmt.Errorf("symlink cache open by handle at: %w", err)
}
// Find the file with the handle passed in
handle := unix.NewFileHandle(dh.handleType, fh)
fd, err := unix.OpenByHandleAt(unix.AT_FDCWD, handle, unix.O_RDONLY|unix.O_PATH|unix.O_NOFOLLOW) // needs O_PATH for symlinks
@@ -209,15 +175,3 @@ func (dh *diskHandler) symlinkCacheRemove(fh []byte, cachePath string) error {
return os.Remove(cachePath)
}
// Return a suffix for the file handle or nil
func (dh *diskHandler) symlinkCacheSuffix(fh []byte) []byte {
if len(fh) < 4 {
return nil
}
length := int(binary.BigEndian.Uint32(fh[:4])) + 4
if len(fh) <= length {
return nil
}
return fh[length:]
}

View File

@@ -106,23 +106,14 @@ backend that rclone supports.
`, "|", "`")
// OptionsInfo descripts the Options in use
var OptionsInfo = fs.Options{{
Name: "auth_proxy",
Default: "",
Help: "A program to use to create the backend from the auth",
}}
// Options is options for creating the proxy
type Options struct {
AuthProxy string `config:"auth_proxy"`
AuthProxy string
}
// Opt is the default options
var Opt Options
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "proxy", Opt: &Opt, Options: OptionsInfo})
// DefaultOpt is the default values uses for Opt
var DefaultOpt = Options{
AuthProxy: "",
}
// Proxy represents a proxy to turn auth requests into a VFS
@@ -131,7 +122,6 @@ type Proxy struct {
vfsCache *libcache.Cache
ctx context.Context // for global config
Opt Options
vfsOpt vfscommon.Options
}
// cacheEntry is what is stored in the vfsCache
@@ -141,15 +131,12 @@ type cacheEntry struct {
}
// New creates a new proxy with the Options passed in
//
// Any VFS are created with the vfsOpt passed in.
func New(ctx context.Context, opt *Options, vfsOpt *vfscommon.Options) *Proxy {
func New(ctx context.Context, opt *Options) *Proxy {
return &Proxy{
ctx: ctx,
Opt: *opt,
cmdLine: strings.Fields(opt.AuthProxy),
vfsCache: libcache.New(),
vfsOpt: *vfsOpt,
}
}
@@ -255,7 +242,7 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value any, err error)
// need to in memory. An attacker would find it easier to go
// after the unencrypted password in memory most likely.
entry := cacheEntry{
vfs: vfs.New(f, &p.vfsOpt),
vfs: vfs.New(f, &vfscommon.Opt),
pwHash: sha256.Sum256([]byte(auth)),
}
return entry, true, nil

View File

@@ -13,17 +13,16 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/ssh"
)
func TestRun(t *testing.T) {
opt := Opt
opt := DefaultOpt
cmd := "go run proxy_code.go"
opt.AuthProxy = cmd
p := New(context.Background(), &opt, &vfscommon.Opt)
p := New(context.Background(), &opt)
t.Run("Normal", func(t *testing.T) {
config, err := p.run(map[string]string{

View File

@@ -7,7 +7,12 @@ import (
"github.com/spf13/pflag"
)
// Options set by command line flags
var (
Opt = proxy.DefaultOpt
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
flags.AddFlagsFromOptions(flagSet, "", proxy.OptionsInfo)
flags.StringVarP(flagSet, &Opt.AuthProxy, "auth-proxy", "", Opt.AuthProxy, "A program to use to create the backend from the auth", "")
}

View File

@@ -1,355 +0,0 @@
package serve
import (
"cmp"
"context"
"errors"
"fmt"
"math/rand/v2"
"net"
"slices"
"sort"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/errcount"
)
// Handle describes what a server can do
type Handle interface {
// Addr returns the listening address of the server
Addr() net.Addr
// Shutdown stops the server
Shutdown() error
// Serve starts the server - doesn't return until Shutdown is called.
Serve() (err error)
}
// Describes a running server
type server struct {
ID string `json:"id"` // id of the server
Addr string `json:"addr"` // address of the server
Params rc.Params `json:"params"` // Parameters used to start the server
h Handle `json:"-"` // control the server
errChan chan error `json:"-"` // receive errors from the server process
}
// Fn starts an rclone serve command
type Fn func(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error)
// Globals
var (
// mutex to protect all the variables in this block
serveMu sync.Mutex
// Serve functions available
serveFns = map[string]Fn{}
// Running servers
servers = map[string]*server{}
)
// AddRc adds the named serve function to the rc
func AddRc(name string, serveFunction Fn) {
serveMu.Lock()
defer serveMu.Unlock()
serveFns[name] = serveFunction
}
// unquote `
func q(s string) string {
return strings.ReplaceAll(s, "|", "`")
}
func init() {
rc.Add(rc.Call{
Path: "serve/start",
AuthRequired: true,
Fn: startRc,
Title: "Create a new server",
Help: q(`Create a new server with the specified parameters.
This takes the following parameters:
- |type| - type of server: |http|, |webdav|, |ftp|, |sftp|, |nfs|, etc.
- |fs| - remote storage path to serve
- |addr| - the ip:port to run the server on, eg ":1234" or "localhost:1234"
Other parameters are as described in the documentation for the
relevant [rclone serve](/commands/rclone_serve/) command line options.
To translate a command line option to an rc parameter, remove the
leading |--| and replace |-| with |_|, so |--vfs-cache-mode| becomes
|vfs_cache_mode|. Note that global parameters must be set with
|_config| and |_filter| as described above.
Examples:
rclone rc serve/start type=nfs fs=remote: addr=:4321 vfs_cache_mode=full
rclone rc serve/start --json '{"type":"nfs","fs":"remote:","addr":":1234","vfs_cache_mode":"full"}'
This will give the reply
|||json
{
"addr": "[::]:4321", // Address the server was started on
"id": "nfs-ecfc6852" // Unique identifier for the server instance
}
|||
Or an error if it failed to start.
Stop the server with |serve/stop| and list the running servers with |serve/list|.
`),
})
}
// startRc allows the serve command to be run from rc
func startRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
serveType, err := in.GetString("type")
serveMu.Lock()
defer serveMu.Unlock()
serveFn := serveFns[serveType]
if serveFn == nil {
return nil, fmt.Errorf("could not find serve type=%q", serveType)
}
// Get Fs.fs to be served from fs parameter in the params
f, err := rc.GetFs(ctx, in)
if err != nil {
return nil, err
}
// Make a background context and copy the config back.
newCtx := context.Background()
newCtx = fs.CopyConfig(newCtx, ctx)
newCtx = filter.CopyConfig(newCtx, ctx)
// Start the server
h, err := serveFn(newCtx, f, in)
if err != nil {
return nil, fmt.Errorf("could not start serve %q: %w", serveType, err)
}
// Start the server running in the background
errChan := make(chan error, 1)
go func() {
errChan <- h.Serve()
close(errChan)
}()
// Wait for a short length of time to see if an error occurred
select {
case err = <-errChan:
if err == nil {
err = errors.New("server stopped immediately")
}
case <-time.After(100 * time.Millisecond):
err = nil
}
if err != nil {
return nil, fmt.Errorf("error when starting serve %q: %w", serveType, err)
}
// Store it for later
runningServer := server{
ID: fmt.Sprintf("%s-%08x", serveType, rand.Uint32()),
Params: in,
Addr: h.Addr().String(),
h: h,
errChan: errChan,
}
servers[runningServer.ID] = &runningServer
out = rc.Params{
"id": runningServer.ID,
"addr": runningServer.Addr,
}
fs.Debugf(f, "Started serve %s on %s", serveType, runningServer.Addr)
return out, nil
}
func init() {
rc.Add(rc.Call{
Path: "serve/stop",
AuthRequired: true,
Fn: stopRc,
Title: "Unserve selected active serve",
Help: q(`Stops a running |serve| instance by ID.
This takes the following parameters:
- id: as returned by serve/start
This will give an empty response if successful or an error if not.
Example:
rclone rc serve/stop id=12345
`),
})
}
// stopRc stops the server process
func stopRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
id, err := in.GetString("id")
if err != nil {
return nil, err
}
serveMu.Lock()
defer serveMu.Unlock()
s := servers[id]
if s == nil {
return nil, fmt.Errorf("server with id=%q not found", id)
}
err = s.h.Shutdown()
<-s.errChan // ignore server return error - likely is "use of closed network connection"
delete(servers, id)
return nil, err
}
func init() {
rc.Add(rc.Call{
Path: "serve/types",
AuthRequired: true,
Fn: serveTypesRc,
Title: "Show all possible serve types",
Help: q(`This shows all possible serve types and returns them as a list.
This takes no parameters and returns
- types: list of serve types, eg "nfs", "sftp", etc
The serve types are strings like "serve", "serve2", "cserve" and can
be passed to serve/start as the serveType parameter.
Eg
rclone rc serve/types
Returns
|||json
{
"types": [
"http",
"sftp",
"nfs"
]
}
|||
`),
})
}
// serveTypesRc returns a list of available serve types.
func serveTypesRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
var serveTypes = []string{}
serveMu.Lock()
defer serveMu.Unlock()
for serveType := range serveFns {
serveTypes = append(serveTypes, serveType)
}
sort.Strings(serveTypes)
return rc.Params{
"types": serveTypes,
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "serve/list",
AuthRequired: true,
Fn: listRc,
Title: "Show running servers",
Help: q(`Show running servers with IDs.
This takes no parameters and returns
- list: list of running serve commands
Each list element will have
- id: ID of the server
- addr: address the server is running on
- params: parameters used to start the server
Eg
rclone rc serve/list
Returns
|||json
{
"list": [
{
"addr": "[::]:4321",
"id": "nfs-ffc2a4e5",
"params": {
"fs": "remote:",
"opt": {
"ListenAddr": ":4321"
},
"type": "nfs",
"vfsOpt": {
"CacheMode": "full"
}
}
}
]
}
|||
`),
})
}
// listRc returns a list of current serves sorted by serve path
func listRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
serveMu.Lock()
defer serveMu.Unlock()
list := []*server{}
for _, item := range servers {
list = append(list, item)
}
slices.SortFunc(list, func(a, b *server) int {
return cmp.Compare(a.ID, b.ID)
})
return rc.Params{
"list": list,
}, nil
}
func init() {
rc.Add(rc.Call{
Path: "serve/stopall",
AuthRequired: true,
Fn: stopAll,
Title: "Stop all active servers",
Help: q(`Stop all active servers.
This will stop all active servers.
rclone rc serve/stopall
`),
})
}
// stopAll shuts all the servers down
func stopAll(_ context.Context, in rc.Params) (out rc.Params, err error) {
serveMu.Lock()
defer serveMu.Unlock()
ec := errcount.New()
for id, s := range servers {
ec.Add(s.h.Shutdown())
<-s.errChan // ignore server return error - likely is "use of closed network connection"
delete(servers, id)
}
return nil, ec.Err("error when stopping server")
}

View File

@@ -1,180 +0,0 @@
package serve
import (
"context"
"errors"
"net"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type dummyServer struct {
addr *net.TCPAddr
shutdownCh chan struct{}
shutdownCalled bool
}
func (d *dummyServer) Addr() net.Addr {
return d.addr
}
func (d *dummyServer) Shutdown() error {
d.shutdownCalled = true
close(d.shutdownCh)
return nil
}
func (d *dummyServer) Serve() error {
<-d.shutdownCh
return nil
}
func newServer(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) {
return &dummyServer{
addr: &net.TCPAddr{
IP: net.IPv4(127, 0, 0, 1),
Port: 8080,
},
shutdownCh: make(chan struct{}),
}, nil
}
func newServerError(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) {
return nil, errors.New("serve error")
}
func newServerImmediateStop(ctx context.Context, f fs.Fs, in rc.Params) (Handle, error) {
h, _ := newServer(ctx, f, in)
close(h.(*dummyServer).shutdownCh)
return h, nil
}
func resetGlobals() {
serveMu.Lock()
defer serveMu.Unlock()
serveFns = make(map[string]Fn)
servers = make(map[string]*server)
}
func newTest(t *testing.T) {
_, err := fs.Find("mockfs")
if err != nil {
mockfs.Register()
}
resetGlobals()
t.Cleanup(resetGlobals)
}
func TestRcStartServeType(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
in := rc.Params{"fs": ":mockfs:", "type": "nonexistent"}
_, err := serveStart.Fn(context.Background(), in)
assert.ErrorContains(t, err, "could not find serve type")
}
func TestRcStartServeFnError(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
AddRc("error", newServerError)
in := rc.Params{"fs": ":mockfs:", "type": "error"}
_, err := serveStart.Fn(context.Background(), in)
assert.ErrorContains(t, err, "could not start serve")
}
func TestRcStartImmediateStop(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
AddRc("immediate", newServerImmediateStop)
in := rc.Params{"fs": ":mockfs:", "type": "immediate"}
_, err := serveStart.Fn(context.Background(), in)
assert.ErrorContains(t, err, "server stopped immediately")
}
func TestRcStartAndStop(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
serveStop := rc.Calls.Get("serve/stop")
AddRc("dummy", newServer)
in := rc.Params{"fs": ":mockfs:", "type": "dummy"}
out, err := serveStart.Fn(context.Background(), in)
require.NoError(t, err)
id := out["id"].(string)
assert.Contains(t, id, "dummy")
assert.Equal(t, 1, len(servers))
_, err = serveStop.Fn(context.Background(), rc.Params{"id": id})
require.NoError(t, err)
assert.Equal(t, 0, len(servers))
}
func TestRcStopNonexistent(t *testing.T) {
newTest(t)
serveStop := rc.Calls.Get("serve/stop")
_, err := serveStop.Fn(context.Background(), rc.Params{"id": "nonexistent"})
assert.ErrorContains(t, err, "not found")
}
func TestRcServeTypes(t *testing.T) {
newTest(t)
serveTypes := rc.Calls.Get("serve/types")
AddRc("a", newServer)
AddRc("c", newServer)
AddRc("b", newServer)
out, err := serveTypes.Fn(context.Background(), nil)
require.NoError(t, err)
types := out["types"].([]string)
assert.Equal(t, types, []string{"a", "b", "c"})
}
func TestRcList(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
serveList := rc.Calls.Get("serve/list")
AddRc("dummy", newServer)
// Start two servers.
_, err := serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
_, err = serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
// Check list
out, err := serveList.Fn(context.Background(), nil)
require.NoError(t, err)
list := out["list"].([]*server)
assert.Equal(t, 2, len(list))
}
func TestRcStopAll(t *testing.T) {
newTest(t)
serveStart := rc.Calls.Get("serve/start")
serveStopAll := rc.Calls.Get("serve/stopall")
AddRc("dummy", newServer)
_, err := serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
_, err = serveStart.Fn(context.Background(), rc.Params{"fs": ":mockfs:", "type": "dummy"})
require.NoError(t, err)
assert.Equal(t, 2, len(servers))
_, err = serveStopAll.Fn(context.Background(), nil)
require.NoError(t, err)
assert.Equal(t, 0, len(servers))
}

View File

@@ -6,7 +6,6 @@ import (
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"os"
"path"
@@ -17,13 +16,10 @@ import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/walk"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
@@ -33,63 +29,37 @@ import (
"golang.org/x/net/http2"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "stdio",
Default: false,
Help: "Run an HTTP2 server on stdin/stdout",
}, {
Name: "append_only",
Default: false,
Help: "Disallow deletion of repository data",
}, {
Name: "private_repos",
Default: false,
Help: "Users can only access their private repo",
}, {
Name: "cache_objects",
Default: true,
Help: "Cache listed objects",
}}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
HTTP libhttp.Config
Stdio bool `config:"stdio"`
AppendOnly bool `config:"append_only"`
PrivateRepos bool `config:"private_repos"`
CacheObjects bool `config:"cache_objects"`
Stdio bool
AppendOnly bool
PrivateRepos bool
CacheObjects bool
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
Auth: libhttp.DefaultAuthCfg(),
HTTP: libhttp.DefaultCfg(),
}
// Opt is options set by command line flags
var Opt Options
var Opt = DefaultOpt
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "restic", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("restic", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read opts
var opt = Opt // set default opts
err := configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
if opt.Stdio {
return nil, errors.New("can't use --stdio via the rc")
}
// Create server
return newServer(ctx, f, &opt)
})
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout", "")
flags.BoolVarP(flagSet, &Opt.AppendOnly, "append-only", "", false, "Disallow deletion of repository data", "")
flags.BoolVarP(flagSet, &Opt.PrivateRepos, "private-repos", "", false, "Users can only access their private repo", "")
flags.BoolVarP(flagSet, &Opt.CacheObjects, "cache-objects", "", true, "Cache listed objects", "")
}
// Command definition for cobra
@@ -203,15 +173,17 @@ with a path of ` + "`/<username>/`" + `.
httpSrv := &http2.Server{}
opts := &http2.ServeConnOpts{
Handler: s.server.Router(),
Handler: s.Server.Router(),
}
httpSrv.ServeConn(conn, opts)
return nil
}
fs.Logf(s.f, "Serving restic REST API on %s", s.server.URLs())
fs.Logf(s.f, "Serving restic REST API on %s", s.URLs())
defer systemd.Notify()()
return s.Serve()
s.Wait()
return nil
})
},
}
@@ -267,10 +239,10 @@ func checkPrivate(next http.Handler) http.Handler {
// server contains everything to run the server
type server struct {
server *libhttp.Server
f fs.Fs
cache *cache
opt Options
*libhttp.Server
f fs.Fs
cache *cache
opt Options
}
func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error) {
@@ -283,35 +255,19 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error
if opt.Stdio {
opt.HTTP.ListenAddr = nil
}
s.server, err = libhttp.NewServer(ctx,
s.Server, err = libhttp.NewServer(ctx,
libhttp.WithConfig(opt.HTTP),
libhttp.WithAuth(opt.Auth),
)
if err != nil {
return nil, fmt.Errorf("failed to init server: %w", err)
}
router := s.server.Router()
router := s.Router()
s.Bind(router)
s.Server.Serve()
return s, nil
}
// Serve restic until the server is shutdown
func (s *server) Serve() error {
s.server.Serve()
s.server.Wait()
return nil
}
// Return the first address of the server
func (s *server) Addr() net.Addr {
return s.server.Addr()
}
// Shutdown the server
func (s *server) Shutdown() error {
return s.server.Shutdown()
}
// bind helper for main Bind method
func (s *server) bind(router chi.Router) {
router.MethodFunc("GET", "/*", func(w http.ResponseWriter, r *http.Request) {

View File

@@ -119,7 +119,7 @@ func TestResticHandler(t *testing.T) {
f := cmd.NewFsSrc([]string{tempdir})
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.server.Router()
router := s.Server.Router()
// create the repo
checkRequest(t, router.ServeHTTP,

View File

@@ -41,7 +41,7 @@ func TestResticPrivateRepositories(t *testing.T) {
f := cmd.NewFsSrc([]string{tempdir})
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.server.Router()
router := s.Server.Router()
// Requesting /test/ should allow access
reqs := []*http.Request{

View File

@@ -14,9 +14,7 @@ import (
_ "github.com/rclone/rclone/backend/all"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -28,7 +26,7 @@ const (
)
func newOpt() Options {
opt := Opt
opt := DefaultOpt
opt.HTTP.ListenAddr = []string{testBindAddress}
return opt
}
@@ -58,10 +56,7 @@ func TestResticIntegration(t *testing.T) {
// Start the server
s, err := newServer(ctx, fremote, &opt)
require.NoError(t, err)
go func() {
require.NoError(t, s.Serve())
}()
testURL := s.server.URLs()[0]
testURL := s.Server.URLs()[0]
defer func() {
_ = s.Shutdown()
}()
@@ -141,7 +136,7 @@ func TestListErrors(t *testing.T) {
f := &listErrorFs{Fs: cmd.NewFsSrc([]string{tempdir})}
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.server.Router()
router := s.Server.Router()
req := newRequest(t, "GET", "/test/snapshots/", nil)
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusInternalServerError)})
@@ -166,7 +161,7 @@ func TestServeErrors(t *testing.T) {
f := &newObjectErrorFs{Fs: cmd.NewFsSrc([]string{tempdir})}
s, err := newServer(ctx, f, &opt)
require.NoError(t, err)
router := s.server.Router()
router := s.Server.Router()
f.err = errors.New("oops")
req := newRequest(t, "GET", "/test/config", nil)
@@ -175,9 +170,3 @@ func TestServeErrors(t *testing.T) {
f.err = fs.ErrorObjectNotFound
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusNotFound)})
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "restic",
})
}

View File

@@ -25,13 +25,15 @@ var (
// s3Backend implements the gofacess3.Backend interface to make an S3
// backend for gofakes3
type s3Backend struct {
opt *Options
s *Server
meta *sync.Map
}
// newBackend creates a new SimpleBucketBackend.
func newBackend(s *Server) gofakes3.Backend {
func newBackend(s *Server, opt *Options) gofakes3.Backend {
return &s3Backend{
opt: opt,
s: s,
meta: new(sync.Map),
}
@@ -134,7 +136,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
fobj := entry.(fs.Object)
size := node.Size()
hash := getFileHashByte(fobj, b.s.etagHashType)
hash := getFileHashByte(fobj)
meta := map[string]string{
"Last-Modified": formatHeaderTime(node.ModTime()),
@@ -185,7 +187,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
file := node.(*vfs.File)
size := node.Size()
hash := getFileHashByte(fobj, b.s.etagHashType)
hash := getFileHashByte(fobj)
in, err := file.Open(os.O_RDONLY)
if err != nil {

View File

@@ -28,8 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
if entry.IsDir() {
if addPrefix {
prefixWithTrailingSlash := objectPath + "/"
response.AddPrefix(prefixWithTrailingSlash)
response.AddPrefix(objectPath)
continue
}
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
@@ -40,7 +39,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
item := &gofakes3.Content{
Key: objectPath,
LastModified: gofakes3.NewContentTime(entry.ModTime()),
ETag: getFileHash(entry, b.s.etagHashType),
ETag: getFileHash(entry),
Size: entry.Size(),
StorageClass: gofakes3.StorageStandard,
}

View File

@@ -6,86 +6,41 @@ import (
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fs/hash"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "force_path_style",
Default: true,
Help: "If true use path style access if false use virtual hosted style",
}, {
Name: "etag_hash",
Default: "MD5",
Help: "Which hash to use for the ETag, or auto or blank for off",
}, {
Name: "auth_key",
Default: []string{},
Help: "Set key pair for v4 authorization: access_key_id,secret_access_key",
}, {
Name: "no_cleanup",
Default: false,
Help: "Not to cleanup empty folder after object is deleted",
}}.
Add(httplib.ConfigInfo).
Add(httplib.AuthConfigInfo)
// Options contains options for the s3 Server
type Options struct {
//TODO add more options
ForcePathStyle bool `config:"force_path_style"`
EtagHash string `config:"etag_hash"`
AuthKey []string `config:"auth_key"`
NoCleanup bool `config:"no_cleanup"`
Auth httplib.AuthConfig
HTTP httplib.Config
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
pathBucketMode: true,
hashName: "MD5",
hashType: hash.MD5,
noCleanup: false,
Auth: httplib.DefaultAuthCfg(),
HTTP: httplib.DefaultCfg(),
}
// Opt is options set by command line flags
var Opt Options
var Opt = DefaultOpt
const flagPrefix = ""
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "s3", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
httplib.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
httplib.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
serve.Command.AddCommand(Command)
serve.AddRc("s3", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
flags.BoolVarP(flagSet, &Opt.pathBucketMode, "force-path-style", "", Opt.pathBucketMode, "If true use path style access if false use virtual hosted style (default true)", "")
flags.StringVarP(flagSet, &Opt.hashName, "etag-hash", "", Opt.hashName, "Which hash to use for the ETag, or auto or blank for off", "")
flags.StringArrayVarP(flagSet, &Opt.authPair, "auth-key", "", Opt.authPair, "Set key pair for v4 authorization: access_key_id,secret_access_key", "")
flags.BoolVarP(flagSet, &Opt.noCleanup, "no-cleanup", "", Opt.noCleanup, "Not to cleanup empty folder after object is deleted", "")
}
//go:embed serve_s3.md
@@ -108,19 +63,34 @@ var Command = &cobra.Command{
Long: help() + httplib.AuthHelp(flagPrefix) + httplib.Help(flagPrefix) + vfs.Help(),
RunE: func(command *cobra.Command, args []string) error {
var f fs.Fs
if proxy.Opt.AuthProxy == "" {
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
if Opt.hashName == "auto" {
Opt.hashType = f.Hashes().GetOne()
} else if Opt.hashName != "" {
err := Opt.hashType.Set(Opt.hashName)
if err != nil {
return err
}
return s.Serve()
}
cmd.Run(false, false, command, func() error {
s, err := newServer(context.Background(), f, &Opt)
if err != nil {
return err
}
router := s.server.Router()
s.Bind(router)
err = s.Serve()
if err != nil {
return err
}
s.server.Wait()
return nil
})
return nil
},

View File

@@ -18,16 +18,15 @@ import (
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -37,16 +36,23 @@ const (
)
// Configure and serve the server
func serveS3(t *testing.T, f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
func serveS3(f fs.Fs) (testURL string, keyid string, keysec string, w *Server) {
keyid = random.String(16)
keysec = random.String(16)
opt := Opt // copy default options
opt.AuthKey = []string{fmt.Sprintf("%s,%s", keyid, keysec)}
opt.HTTP.ListenAddr = []string{endpoint}
w, _ = newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
go func() {
require.NoError(t, w.Serve())
}()
serveropt := &Options{
HTTP: httplib.DefaultCfg(),
pathBucketMode: true,
hashName: "",
hashType: hash.None,
authPair: []string{fmt.Sprintf("%s,%s", keyid, keysec)},
}
serveropt.HTTP.ListenAddr = []string{endpoint}
w, _ = newServer(context.Background(), f, serveropt)
router := w.server.Router()
w.Bind(router)
_ = w.Serve()
testURL = w.server.URLs()[0]
return
@@ -56,7 +62,7 @@ func serveS3(t *testing.T, f fs.Fs) (testURL string, keyid string, keysec string
// s3 remote against it.
func TestS3(t *testing.T) {
start := func(f fs.Fs) (configmap.Simple, func()) {
testURL, keyid, keysec, _ := serveS3(t, f)
testURL, keyid, keysec, _ := serveS3(f)
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "s3",
@@ -119,7 +125,7 @@ func TestEncodingWithMinioClient(t *testing.T) {
_, err = f.Put(context.Background(), in, obji)
assert.NoError(t, err)
endpoint, keyid, keysec, _ := serveS3(t, f)
endpoint, keyid, keysec, _ := serveS3(f)
testURL, _ := url.Parse(endpoint)
minioClient, err := minio.New(testURL.Host, &minio.Options{
Creds: credentials.NewStaticV4(keyid, keysec, ""),
@@ -167,9 +173,9 @@ func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) {
cmd := "go run " + prog + " " + files
// FIXME: this is untidy setting a global variable!
proxy.Opt.AuthProxy = cmd
proxyflags.Opt.AuthProxy = cmd
defer func() {
proxy.Opt.AuthProxy = ""
proxyflags.Opt.AuthProxy = ""
}()
f = nil
@@ -182,7 +188,7 @@ func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) {
for _, tt := range cases {
t.Run(tt.description, func(t *testing.T) {
endpoint, keyid, keysec, s := serveS3(t, f)
endpoint, keyid, keysec, s := serveS3(f)
defer func() {
assert.NoError(t, s.server.Shutdown())
}()
@@ -290,10 +296,3 @@ func TestListBucketsAuthProxy(t *testing.T) {
testListBuckets(t, cases, true)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "s3",
"vfs_cache_mode": "off",
})
}

View File

@@ -14,7 +14,7 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
access.
Please note that some clients may require HTTPS endpoints. See [the
SSL docs](#tls-ssl) for more information.
SSL docs](#ssl-tls) for more information.
This command uses the [VFS directory cache](#vfs-virtual-file-system).
All the functionality will work with `--vfs-cache-mode off`. Using

View File

@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"strings"
@@ -16,6 +15,7 @@ import (
"github.com/rclone/gofakes3"
"github.com/rclone/gofakes3/signature"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
httplib "github.com/rclone/rclone/lib/http"
@@ -29,70 +29,67 @@ const (
ctxKeyID ctxKey = iota
)
// Options contains options for the http Server
type Options struct {
//TODO add more options
pathBucketMode bool
hashName string
hashType hash.Type
authPair []string
noCleanup bool
Auth httplib.AuthConfig
HTTP httplib.Config
}
// Server is a s3.FileSystem interface
type Server struct {
server *httplib.Server
opt Options
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
faker *gofakes3.GoFakeS3
handler http.Handler
proxy *proxy.Proxy
ctx context.Context // for global config
s3Secret string
etagHashType hash.Type
server *httplib.Server
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
faker *gofakes3.GoFakeS3
handler http.Handler
proxy *proxy.Proxy
ctx context.Context // for global config
s3Secret string
}
// Make a new S3 Server to serve the remote
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *Server, err error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *Server, err error) {
w := &Server{
f: f,
ctx: ctx,
opt: *opt,
etagHashType: hash.None,
f: f,
ctx: ctx,
}
if w.opt.EtagHash == "auto" {
w.etagHashType = f.Hashes().GetOne()
} else if w.opt.EtagHash != "" {
err := w.etagHashType.Set(w.opt.EtagHash)
if err != nil {
return nil, err
}
}
if w.etagHashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", w.etagHashType)
}
if len(opt.AuthKey) == 0 {
if len(opt.authPair) == 0 {
fs.Logf("serve s3", "No auth provided so allowing anonymous access")
} else {
w.s3Secret = getAuthSecret(opt.AuthKey)
w.s3Secret = getAuthSecret(opt.authPair)
}
var newLogger logger
w.faker = gofakes3.New(
newBackend(w),
gofakes3.WithHostBucket(!opt.ForcePathStyle),
newBackend(w, opt),
gofakes3.WithHostBucket(!opt.pathBucketMode),
gofakes3.WithLogger(newLogger),
gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
gofakes3.WithV4Auth(authlistResolver(opt.authPair)),
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
)
w.handler = http.NewServeMux()
w.handler = w.faker.Server()
if proxy.Opt.AuthProxy != "" {
w.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
if proxyflags.Opt.AuthProxy != "" {
w.proxy = proxy.New(ctx, &proxyflags.Opt)
// proxy auth middleware
w.handler = proxyAuthMiddleware(w.handler, w)
w.handler = authPairMiddleware(w.handler, w)
} else {
w._vfs = vfs.New(f, vfsOpt)
w._vfs = vfs.New(f, &vfscommon.Opt)
if len(opt.AuthKey) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
if len(opt.authPair) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.authPair))
}
}
@@ -104,9 +101,6 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
return nil, fmt.Errorf("failed to init server: %w", err)
}
router := w.server.Router()
w.Bind(router)
return w, nil
}
@@ -141,24 +135,13 @@ func (w *Server) Bind(router chi.Router) {
router.Handle("/*", w.handler)
}
// Serve serves the s3 server until the server is shutdown
// Serve serves the s3 server
func (w *Server) Serve() error {
w.server.Serve()
fs.Logf(w.f, "Starting s3 server on %s", w.server.URLs())
w.server.Wait()
return nil
}
// Addr returns the first address of the server
func (w *Server) Addr() net.Addr {
return w.server.Addr()
}
// Shutdown the server
func (w *Server) Shutdown() error {
return w.server.Shutdown()
}
func authPairMiddleware(next http.Handler, ws *Server) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
accessKey, _ := parseAccessKeyID(r)

View File

@@ -36,15 +36,15 @@ func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) {
return dirEntries, nil
}
func getFileHashByte(node any, hashType hash.Type) []byte {
b, err := hex.DecodeString(getFileHash(node, hashType))
func getFileHashByte(node any) []byte {
b, err := hex.DecodeString(getFileHash(node))
if err != nil {
return nil
}
return b
}
func getFileHash(node any, hashType hash.Type) string {
func getFileHash(node any) string {
var o fs.Object
switch b := node.(type) {
@@ -59,7 +59,7 @@ func getFileHash(node any, hashType hash.Type) string {
defer func() {
_ = in.Close()
}()
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
h, err := hash.NewMultiHasherTypes(hash.NewHashSet(Opt.hashType))
if err != nil {
return ""
}
@@ -67,14 +67,14 @@ func getFileHash(node any, hashType hash.Type) string {
if err != nil {
return ""
}
return h.Sums()[hashType]
return h.Sums()[Opt.hashType]
}
o = fsObj
case fs.Object:
o = b
}
hash, err := o.Hash(context.Background(), hashType)
hash, err := o.Hash(context.Background(), Opt.hashType)
if err != nil {
return ""
}

View File

@@ -5,10 +5,44 @@ import (
"errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/dlna"
"github.com/rclone/rclone/cmd/serve/docker"
"github.com/rclone/rclone/cmd/serve/ftp"
"github.com/rclone/rclone/cmd/serve/http"
"github.com/rclone/rclone/cmd/serve/nfs"
"github.com/rclone/rclone/cmd/serve/restic"
"github.com/rclone/rclone/cmd/serve/s3"
"github.com/rclone/rclone/cmd/serve/sftp"
"github.com/rclone/rclone/cmd/serve/webdav"
"github.com/spf13/cobra"
)
func init() {
Command.AddCommand(http.Command)
if webdav.Command != nil {
Command.AddCommand(webdav.Command)
}
if restic.Command != nil {
Command.AddCommand(restic.Command)
}
if dlna.Command != nil {
Command.AddCommand(dlna.Command)
}
if ftp.Command != nil {
Command.AddCommand(ftp.Command)
}
if sftp.Command != nil {
Command.AddCommand(sftp.Command)
}
if docker.Command != nil {
Command.AddCommand(docker.Command)
}
if nfs.Command != nil {
Command.AddCommand(nfs.Command)
}
if s3.Command != nil {
Command.AddCommand(s3.Command)
}
cmd.Root.AddCommand(Command)
}

View File

@@ -1,77 +0,0 @@
package servetest
import (
"context"
"fmt"
"net"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// GetEphemeralPort opens a listening port on localhost:0, closes it,
// and returns the address as "localhost:port".
func GetEphemeralPort(t *testing.T) string {
listener, err := net.Listen("tcp", "localhost:0") // Listen on any available port
require.NoError(t, err)
defer func() {
require.NoError(t, listener.Close())
}()
return listener.Addr().String()
}
// checkTCP attempts to establish a TCP connection to the given address,
// and closes it if successful. Returns an error if the connection fails.
func checkTCP(address string) error {
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
if err != nil {
return fmt.Errorf("failed to connect to %s: %w", address, err)
}
err = conn.Close()
if err != nil {
return fmt.Errorf("failed to close connection to %s: %w", address, err)
}
return nil
}
// TestRc tests the rc interface for the servers
//
// in should contain any options necessary however this code will add
// "fs", "addr".
func TestRc(t *testing.T, in rc.Params) {
ctx := context.Background()
dir := t.TempDir()
serveStart := rc.Calls.Get("serve/start")
serveStop := rc.Calls.Get("serve/stop")
name := in["type"].(string)
addr := GetEphemeralPort(t)
// Start the server
in["fs"] = dir
in["addr"] = addr
out, err := serveStart.Fn(ctx, in)
require.NoError(t, err)
id := out["id"].(string)
assert.True(t, strings.HasPrefix(id, name+"-"))
gotAddr := out["addr"].(string)
assert.Equal(t, addr, gotAddr)
// Check we can make a TCP connection to the server
t.Logf("Checking connection on %q", addr)
err = checkTCP(addr)
assert.NoError(t, err)
// Stop the server
_, err = serveStop.Fn(ctx, rc.Params{"id": id})
require.NoError(t, err)
// Check we can make no longer make connections to the server
err = checkTCP(addr)
assert.Error(t, err)
}

View File

@@ -13,7 +13,7 @@ import (
"strings"
"testing"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fstest"
@@ -50,9 +50,9 @@ func run(t *testing.T, name string, start StartFn, useProxy bool) {
cmd := "go run " + prog + " " + fremote.Root()
// FIXME this is untidy setting a global variable!
proxy.Opt.AuthProxy = cmd
proxyflags.Opt.AuthProxy = cmd
defer func() {
proxy.Opt.AuthProxy = ""
proxyflags.Opt.AuthProxy = ""
}()
}
config, cleanup := start(f)

View File

@@ -16,13 +16,13 @@ import (
"encoding/pem"
"errors"
"fmt"
"io"
"net"
"os"
"path/filepath"
"strings"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/env"
@@ -41,27 +41,23 @@ type server struct {
ctx context.Context // for global config
config *ssh.ServerConfig
listener net.Listener
stopped chan struct{} // for waiting on the listener to stop
waitChan chan struct{} // for waiting on the listener to close
proxy *proxy.Proxy
}
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*server, error) {
func newServer(ctx context.Context, f fs.Fs, opt *Options) *server {
s := &server{
f: f,
ctx: ctx,
opt: *opt,
stopped: make(chan struct{}),
f: f,
ctx: ctx,
opt: *opt,
waitChan: make(chan struct{}),
}
if proxy.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
if proxyflags.Opt.AuthProxy != "" {
s.proxy = proxy.New(ctx, &proxyflags.Opt)
} else {
s.vfs = vfs.New(f, vfsOpt)
s.vfs = vfs.New(f, &vfscommon.Opt)
}
err := s.configure()
if err != nil {
return nil, fmt.Errorf("sftp configuration failed: %w", err)
}
return s, nil
return s
}
// getVFS gets the vfs from s or the proxy
@@ -133,19 +129,17 @@ func (s *server) acceptConnections() {
}
}
// configure the server
//
// Based on example server code from golang.org/x/crypto/ssh and server_standalone
func (s *server) configure() (err error) {
func (s *server) serve() (err error) {
var authorizedKeysMap map[string]struct{}
// ensure the user isn't trying to use conflicting flags
if proxy.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
if proxyflags.Opt.AuthProxy != "" && s.opt.AuthorizedKeys != "" && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
return errors.New("--auth-proxy and --authorized-keys cannot be used at the same time")
}
// Load the authorized keys
if s.opt.AuthorizedKeys != "" && proxy.Opt.AuthProxy == "" {
if s.opt.AuthorizedKeys != "" && proxyflags.Opt.AuthProxy == "" {
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
// If user set the flag away from the default then report an error
@@ -299,35 +293,42 @@ func (s *server) configure() (err error) {
}
}
s.listener = listener
return nil
}
// Serve SFTP until the server is Shutdown
func (s *server) Serve() (err error) {
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
s.acceptConnections()
close(s.stopped)
go s.acceptConnections()
return nil
}
// Addr returns the address the server is listening on
func (s *server) Addr() net.Addr {
return s.listener.Addr()
func (s *server) Addr() string {
return s.listener.Addr().String()
}
// Serve runs the sftp server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.serve()
if err != nil {
return err
}
return nil
}
// Wait blocks while the listener is open.
func (s *server) Wait() {
<-s.stopped
<-s.waitChan
}
// Shutdown shuts the running server down
func (s *server) Shutdown() error {
// Close shuts the running server down
func (s *server) Close() {
err := s.listener.Close()
if errors.Is(err, io.ErrUnexpectedEOF) {
err = nil
if err != nil {
fs.Errorf(nil, "Error on closing SFTP server: %v", err)
return
}
s.Wait()
return err
close(s.waitChan)
}
func loadPrivateKey(keyPath string) (ssh.Signer, error) {

View File

@@ -5,19 +5,14 @@ package sftp
import (
"context"
"fmt"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/systemd"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -81,29 +76,6 @@ func init() {
vfsflags.AddFlags(Command.Flags())
proxyflags.AddFlags(Command.Flags())
AddFlags(Command.Flags(), &Opt)
serve.Command.AddCommand(Command)
serve.AddRc("sftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
}
// Command definition for cobra
@@ -180,7 +152,7 @@ provided by OpenSSH in this case.
},
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
if proxy.Opt.AuthProxy == "" {
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
@@ -190,12 +162,14 @@ provided by OpenSSH in this case.
if Opt.Stdio {
return serveStdio(f)
}
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
s := newServer(context.Background(), f, &Opt)
err := s.Serve()
if err != nil {
fs.Fatal(nil, fmt.Sprint(err))
return err
}
defer systemd.Notify()()
return s.Serve()
s.Wait()
return nil
})
},
}

View File

@@ -14,14 +14,10 @@ import (
"github.com/pkg/sftp"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -49,14 +45,11 @@ func TestSftp(t *testing.T) {
opt.User = testUser
opt.Pass = testPass
w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
require.NoError(t, err)
go func() {
require.NoError(t, w.Serve())
}()
w := newServer(context.Background(), f, &opt)
require.NoError(t, w.serve())
// Read the host and port we started on
addr := w.Addr().String()
addr := w.Addr()
colon := strings.LastIndex(addr, ":")
// Config for the backend we'll use to connect to the server
@@ -70,18 +63,10 @@ func TestSftp(t *testing.T) {
// return a stop function
return config, func() {
assert.NoError(t, w.Shutdown())
w.Close()
w.Wait()
}
}
servetest.Run(t, "sftp", start)
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "sftp",
"user": "test",
"pass": obscure.MustObscure("test"),
"vfs_cache_mode": "off",
})
}

View File

@@ -7,7 +7,6 @@ import (
"errors"
"fmt"
"mime"
"net"
"net/http"
"os"
"path"
@@ -18,14 +17,11 @@ import (
chi "github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
cmdserve "github.com/rclone/rclone/cmd/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/rc"
libhttp "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/systemd"
@@ -36,65 +32,41 @@ import (
"golang.org/x/net/webdav"
)
// OptionsInfo describes the Options in use
var OptionsInfo = fs.Options{{
Name: "etag_hash",
Default: "",
Help: "Which hash to use for the ETag, or auto or blank for off",
}, {
Name: "disable_dir_list",
Default: false,
Help: "Disable HTML directory list on GET request for a directory",
}}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo).
Add(libhttp.TemplateConfigInfo)
// Options required for http server
type Options struct {
Auth libhttp.AuthConfig
HTTP libhttp.Config
Template libhttp.TemplateConfig
EtagHash string `config:"etag_hash"`
DisableDirList bool `config:"disable_dir_list"`
Auth libhttp.AuthConfig
HTTP libhttp.Config
Template libhttp.TemplateConfig
HashName string
HashType hash.Type
DisableGETDir bool
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{
Auth: libhttp.DefaultAuthCfg(),
HTTP: libhttp.DefaultCfg(),
Template: libhttp.DefaultTemplateCfg(),
HashType: hash.None,
DisableGETDir: false,
}
// Opt is options set by command line flags
var Opt Options
var Opt = DefaultOpt
// flagPrefix is the prefix used to uniquely identify command line flags.
// It is intentionally empty for this package.
const flagPrefix = ""
func init() {
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "webdav", Opt: &Opt, Options: OptionsInfo})
flagSet := Command.Flags()
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
vfsflags.AddFlags(flagSet)
proxyflags.AddFlags(flagSet)
cmdserve.Command.AddCommand(Command)
cmdserve.AddRc("webdav", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
// Read VFS Opts
var vfsOpt = vfscommon.Opt // set default opts
err := configstruct.SetAny(in, &vfsOpt)
if err != nil {
return nil, err
}
// Read Proxy Opts
var proxyOpt = proxy.Opt // set default opts
err = configstruct.SetAny(in, &proxyOpt)
if err != nil {
return nil, err
}
// Read opts
var opt = Opt // set default opts
err = configstruct.SetAny(in, &opt)
if err != nil {
return nil, err
}
// Create server
return newWebDAV(ctx, f, &opt, &vfsOpt, &proxyOpt)
})
flags.StringVarP(flagSet, &Opt.HashName, "etag-hash", "", "", "Which hash to use for the ETag, or auto or blank for off", "")
flags.BoolVarP(flagSet, &Opt.DisableGETDir, "disable-dir-list", "", false, "Disable HTML directory list on GET request for a directory", "")
}
// Command definition for cobra
@@ -163,19 +135,36 @@ done by the permissions on the socket.
},
RunE: func(command *cobra.Command, args []string) error {
var f fs.Fs
if proxy.Opt.AuthProxy == "" {
if proxyflags.Opt.AuthProxy == "" {
cmd.CheckArgs(1, 1, command, args)
f = cmd.NewFsSrc(args)
} else {
cmd.CheckArgs(0, 0, command, args)
}
Opt.HashType = hash.None
if Opt.HashName == "auto" {
Opt.HashType = f.Hashes().GetOne()
} else if Opt.HashName != "" {
err := Opt.HashType.Set(Opt.HashName)
if err != nil {
return err
}
}
if Opt.HashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", Opt.HashType)
}
cmd.Run(false, false, command, func() error {
s, err := newWebDAV(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
s, err := newWebDAV(context.Background(), f, &Opt)
if err != nil {
return err
}
err = s.serve()
if err != nil {
return err
}
defer systemd.Notify()()
return s.Serve()
s.Wait()
return nil
})
return nil
},
@@ -194,47 +183,34 @@ done by the permissions on the socket.
// might apply". In particular, whether or not renaming a file or directory
// overwriting another existing file or directory is an error is OS-dependent.
type WebDAV struct {
server *libhttp.Server
*libhttp.Server
opt Options
f fs.Fs
_vfs *vfs.VFS // don't use directly, use getVFS
webdavhandler *webdav.Handler
proxy *proxy.Proxy
ctx context.Context // for global config
etagHashType hash.Type
}
// check interface
var _ webdav.FileSystem = (*WebDAV)(nil)
// Make a new WebDAV to serve the remote
func newWebDAV(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (w *WebDAV, err error) {
func newWebDAV(ctx context.Context, f fs.Fs, opt *Options) (w *WebDAV, err error) {
w = &WebDAV{
f: f,
ctx: ctx,
opt: *opt,
etagHashType: hash.None,
f: f,
ctx: ctx,
opt: *opt,
}
if opt.EtagHash == "auto" {
w.etagHashType = f.Hashes().GetOne()
} else if opt.EtagHash != "" {
err := w.etagHashType.Set(opt.EtagHash)
if err != nil {
return nil, err
}
}
if w.etagHashType != hash.None {
fs.Debugf(f, "Using hash %v for ETag", w.etagHashType)
}
if proxyOpt.AuthProxy != "" {
w.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
if proxyflags.Opt.AuthProxy != "" {
w.proxy = proxy.New(ctx, &proxyflags.Opt)
// override auth
w.opt.Auth.CustomAuthFn = w.auth
} else {
w._vfs = vfs.New(f, vfsOpt)
w._vfs = vfs.New(f, &vfscommon.Opt)
}
w.server, err = libhttp.NewServer(ctx,
w.Server, err = libhttp.NewServer(ctx,
libhttp.WithConfig(w.opt.HTTP),
libhttp.WithAuth(w.opt.Auth),
libhttp.WithTemplate(w.opt.Template),
@@ -254,7 +230,7 @@ func newWebDAV(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
}
w.webdavhandler = webdavHandler
router := w.server.Router()
router := w.Server.Router()
router.Use(
middleware.SetHeader("Accept-Ranges", "bytes"),
middleware.SetHeader("Server", "rclone/"+fs.Version),
@@ -355,7 +331,7 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
urlPath := r.URL.Path
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if !w.opt.DisableDirList && (r.Method == "GET" || r.Method == "HEAD") && isDir {
if !w.opt.DisableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
w.serveDir(rw, r, remote)
return
}
@@ -402,7 +378,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
}
// Make the entries for display
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
directory := serve.NewDirectory(dirRemote, w.Server.HTMLTemplate())
for _, node := range dirEntries {
if vfscommon.Opt.NoModTime {
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
@@ -418,26 +394,15 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
directory.Serve(rw, r)
}
// Serve HTTP until the server is shutdown
// serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (w *WebDAV) Serve() error {
w.server.Serve()
fs.Logf(w.f, "WebDav Server started on %s", w.server.URLs())
w.server.Wait()
func (w *WebDAV) serve() error {
w.Serve()
fs.Logf(w.f, "WebDav Server started on %s", w.URLs())
return nil
}
// Addr returns the first address of the server
func (w *WebDAV) Addr() net.Addr {
return w.server.Addr()
}
// Shutdown the server
func (w *WebDAV) Shutdown() error {
return w.server.Shutdown()
}
// logRequest is called by the webdav module on every request
func (w *WebDAV) logRequest(r *http.Request, err error) {
fs.Infof(r.URL.Path, "%s from %s", r.Method, r.RemoteAddr)
@@ -550,16 +515,16 @@ func (h Handle) DeadProps() (map[xml.Name]webdav.Property, error) {
property webdav.Property
properties = make(map[xml.Name]webdav.Property)
)
if h.w.etagHashType != hash.None {
if h.w.opt.HashType != hash.None {
entry := h.Handle.Node().DirEntry()
if o, ok := entry.(fs.Object); ok {
hash, err := o.Hash(h.ctx, h.w.etagHashType)
hash, err := o.Hash(h.ctx, h.w.opt.HashType)
if err == nil {
xmlName.Space = "http://owncloud.org/ns"
xmlName.Local = "checksums"
property.XMLName = xmlName
property.InnerXML = append(property.InnerXML, "<checksum xmlns=\"http://owncloud.org/ns\">"...)
property.InnerXML = append(property.InnerXML, strings.ToUpper(h.w.etagHashType.String())...)
property.InnerXML = append(property.InnerXML, strings.ToUpper(h.w.opt.HashType.String())...)
property.InnerXML = append(property.InnerXML, ':')
property.InnerXML = append(property.InnerXML, hash...)
property.InnerXML = append(property.InnerXML, "</checksum>"...)
@@ -612,7 +577,7 @@ type FileInfo struct {
// ETag returns an ETag for the FileInfo
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
// defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
if fi.w.etagHashType == hash.None {
if fi.w.opt.HashType == hash.None {
return "", webdav.ErrNotImplemented
}
node, ok := (fi.FileInfo).(vfs.Node)
@@ -625,7 +590,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
if !ok {
return "", webdav.ErrNotImplemented
}
hash, err := o.Hash(ctx, fi.w.etagHashType)
hash, err := o.Hash(ctx, fi.w.opt.HashType)
if err != nil || hash == "" {
return "", webdav.ErrNotImplemented
}

View File

@@ -18,14 +18,12 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/rclone/rclone/fs/hash"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/webdav"
@@ -50,32 +48,31 @@ var (
func TestWebDav(t *testing.T) {
// Configure and start the server
start := func(f fs.Fs) (configmap.Simple, func()) {
opt := Opt
opt := DefaultOpt
opt.HTTP.ListenAddr = []string{testBindAddress}
opt.HTTP.BaseURL = "/prefix"
opt.Auth.BasicUser = testUser
opt.Auth.BasicPass = testPass
opt.Template.Path = testTemplate
opt.EtagHash = "MD5"
opt.HashType = hash.MD5
// Start the server
w, err := newWebDAV(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
w, err := newWebDAV(context.Background(), f, &opt)
require.NoError(t, err)
go func() {
require.NoError(t, w.Serve())
}()
require.NoError(t, w.serve())
// Config for the backend we'll use to connect to the server
config := configmap.Simple{
"type": "webdav",
"vendor": "rclone",
"url": w.server.URLs()[0],
"url": w.Server.URLs()[0],
"user": testUser,
"pass": obscure.MustObscure(testPass),
}
return config, func() {
assert.NoError(t, w.Shutdown())
w.Wait()
}
}
@@ -101,20 +98,19 @@ func TestHTTPFunction(t *testing.T) {
f, err := fs.NewFs(context.Background(), "../http/testdata/files")
assert.NoError(t, err)
opt := Opt
opt := DefaultOpt
opt.HTTP.ListenAddr = []string{testBindAddress}
opt.Template.Path = testTemplate
// Start the server
w, err := newWebDAV(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
w, err := newWebDAV(context.Background(), f, &opt)
assert.NoError(t, err)
go func() {
require.NoError(t, w.Serve())
}()
require.NoError(t, w.serve())
defer func() {
assert.NoError(t, w.Shutdown())
w.Wait()
}()
testURL := w.server.URLs()[0]
testURL := w.Server.URLs()[0]
pause := time.Millisecond
i := 0
for ; i < 10; i++ {
@@ -264,10 +260,3 @@ func HelpTestGET(t *testing.T, testURL string) {
checkGolden(t, test.Golden, body)
}
}
func TestRc(t *testing.T) {
servetest.TestRc(t, rc.Params{
"type": "webdav",
"vfs_cache_mode": "off",
})
}

View File

@@ -48,7 +48,7 @@ func TestEnvironmentVariables(t *testing.T) {
env = "RCLONE_LOG_LEVEL=DEBUG"
out, err = rcloneEnv(env, "version", "--quiet")
if assert.Error(t, err) {
assert.Contains(t, out, " DEBUG ")
assert.Contains(t, out, " DEBUG : ")
assert.Contains(t, out, "Can't set -q and --log-level")
assert.Contains(t, "exit status 1", err.Error())
}
@@ -329,7 +329,7 @@ func TestEnvironmentVariables(t *testing.T) {
jsonLogOK := func() {
t.Helper()
if assert.NoError(t, err) {
assert.Contains(t, out, `"level":"debug"`)
assert.Contains(t, out, `{"level":"debug",`)
assert.Contains(t, out, `"msg":"Version `)
assert.Contains(t, out, `"}`)
}

View File

@@ -947,29 +947,3 @@ put them back in again.` >}}
* Lorenz Brun <lorenz@brun.one>
* Dave Vasilevsky <djvasi@gmail.com> <dave@vasilevsky.ca>
* luzpaz <luzpaz@users.noreply.github.com>
* jack <9480542+jackusm@users.noreply.github.com>
* Jörn Friedrich Dreyer <jfd@butonic.de>
* alingse <alingse@foxmail.com>
* Fernando Fernández <ferferga@hotmail.com>
* eccoisle <167755281+eccoisle@users.noreply.github.com>
* Klaas Freitag <kraft@freisturz.de>
* Danny Garside <dannygarside@outlook.com>
* Samantha Bowen <sam@bbowen.net>
* simonmcnair <101189766+simonmcnair@users.noreply.github.com>
* huanghaojun <jasen.huang@ugreen.com>
* Enduriel <endur1el@protonmail.com>
* Markus Gerstel <markus.gerstel@osirium.com>
* simwai <16225108+simwai@users.noreply.github.com>
* Ben Alex <ben.alex@acegi.com.au>
* Klaas Freitag <opensource@freisturz.de> <klaas.freitag@kiteworks.com>
* Andrew Kreimer <algonell@gmail.com>
* Ed Craig-Wood <138211970+edc-w@users.noreply.github.com>
* Christian Richter <crichter@owncloud.com> <1058116+dragonchaser@users.noreply.github.com>
* Ralf Haferkamp <r.haferkamp@opencloud.eu>
* Jugal Kishore <me@devjugal.com>
* Tho Neyugn <nguyentruongtho@users.noreply.github.com>
* Ben Boeckel <mathstuf@users.noreply.github.com>
* Clément Wehrung <cwehrung@nurves.com>
* Jeff Geerling <geerlingguy@mac.com>
* Germán Casares <german.casares.march+github@gmail.com>
* fhuber <florian.huber@noris.de>

Some files were not shown because too many files have changed in this diff Show More