mirror of
https://github.com/rclone/rclone.git
synced 2025-12-25 20:53:28 +00:00
Compare commits
2 Commits
v1.70.2
...
fix-convmv
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e14109e1b4 | ||
|
|
d08543070a |
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@@ -226,8 +226,6 @@ jobs:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
id: setup-go
|
||||
@@ -291,10 +289,6 @@ jobs:
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py
|
||||
if: github.event_name == 'pull_request'
|
||||
|
||||
android:
|
||||
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||
timeout-minutes: 30
|
||||
|
||||
@@ -572,19 +572,3 @@ Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
## Keeping a backend or command out of tree
|
||||
|
||||
Rclone was designed to be modular so it is very easy to keep a backend
|
||||
or a command out of the main rclone source tree.
|
||||
|
||||
So for example if you had a backend which accessed your proprietary
|
||||
systems or a command which was specialised for your needs you could
|
||||
add them out of tree.
|
||||
|
||||
This may be easier than using a plugin and is supported on all
|
||||
platforms not just macOS and Linux.
|
||||
|
||||
This is explained further in https://github.com/rclone/rclone_out_of_tree_example
|
||||
which has an example of an out of tree backend `ram` (which is a
|
||||
renamed version of the `memory` backend).
|
||||
|
||||
3982
MANUAL.html
generated
3982
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3339
MANUAL.txt
generated
3339
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
23
README.md
23
README.md
@@ -1,4 +1,20 @@
|
||||
|
||||
<div align="center">
|
||||
<sup>Special thanks to our sponsor:</sup>
|
||||
<br>
|
||||
<br>
|
||||
<a href="https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103">
|
||||
<div>
|
||||
<img src="https://rclone.org/img/logos/warp-github.svg" width="300" alt="Warp">
|
||||
</div>
|
||||
<b>Warp is a modern, Rust-based terminal with AI built in so you and your team can build great software, faster.</b>
|
||||
<div>
|
||||
<sup>Visit warp.dev to learn more.</sup>
|
||||
</div>
|
||||
</a>
|
||||
<br>
|
||||
<hr>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
@@ -40,7 +56,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
|
||||
* Fastmail Files [:page_facing_up:](https://rclone.org/webdav/#fastmail-files)
|
||||
* Files.com [:page_facing_up:](https://rclone.org/filescom/)
|
||||
* FlashBlade [:page_facing_up:](https://rclone.org/s3/#pure-storage-flashblade)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GoFile [:page_facing_up:](https://rclone.org/gofile/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
@@ -65,8 +80,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* Magalu Object Storage [:page_facing_up:](https://rclone.org/s3/#magalu)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* MEGA [:page_facing_up:](https://rclone.org/mega/)
|
||||
* MEGA S4 Object Storage [:page_facing_up:](https://rclone.org/s3/#mega)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft Azure Files Storage [:page_facing_up:](https://rclone.org/azurefiles/)
|
||||
@@ -96,7 +110,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
|
||||
@@ -14,12 +14,10 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/combine"
|
||||
_ "github.com/rclone/rclone/backend/compress"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/doi"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/filefabric"
|
||||
_ "github.com/rclone/rclone/backend/filelu"
|
||||
_ "github.com/rclone/rclone/backend/filescom"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/gofile"
|
||||
|
||||
@@ -44,7 +44,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -612,9 +612,6 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) {
|
||||
containerName, containerPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(containerPath, "//") {
|
||||
containerPath = containerPath[:len(containerPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath)
|
||||
}
|
||||
|
||||
@@ -931,7 +928,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -1217,7 +1213,7 @@ func (f *Fs) list(ctx context.Context, containerName, directory, prefix string,
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addContainer {
|
||||
@@ -1382,7 +1378,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
containerName, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(containerName, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, containerName, directory, prefix, addContainer, true, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -1538,7 +1534,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
@@ -1772,7 +1768,7 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
var (
|
||||
srcSize = src.size
|
||||
partSize = int64(chunksize.Calculator(o, src.size, blockblob.MaxBlocks, f.opt.ChunkSize))
|
||||
numParts = (srcSize + partSize - 1) / partSize
|
||||
numParts = (srcSize-1)/partSize + 1
|
||||
blockIDs = make([]string, numParts) // list of blocks for finalize
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
checker = newCheckForInvalidBlockOrBlob("copy", o)
|
||||
@@ -2180,6 +2176,11 @@ func (o *Object) getTags() (tags map[string]string) {
|
||||
// getBlobSVC creates a blob client
|
||||
func (o *Object) getBlobSVC() *blob.Client {
|
||||
container, directory := o.split()
|
||||
// If we are trying to remove an all / directory marker then
|
||||
// this will have one / too many now.
|
||||
if bucket.IsAllSlashes(o.remote) {
|
||||
directory = strings.TrimSuffix(directory, "/")
|
||||
}
|
||||
return o.fs.getBlobSVC(container, directory)
|
||||
}
|
||||
|
||||
@@ -2862,9 +2863,6 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
return ui, err
|
||||
}
|
||||
}
|
||||
// if ui.isDirMarker && strings.HasSuffix(containerPath, "//") {
|
||||
// containerPath = containerPath[:len(containerPath)-1]
|
||||
// }
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
|
||||
@@ -516,7 +516,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
|
||||
// User with username and password
|
||||
//nolint:staticcheck // this is deprecated due to Azure policy
|
||||
options := azidentity.UsernamePasswordCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
}
|
||||
@@ -922,7 +921,7 @@ func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// getMetadata gets the metadata if it hasn't already been fetched
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) getMetadata(ctx context.Context) error {
|
||||
resp, err := o.fileClient().GetProperties(ctx, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -31,8 +31,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
@@ -918,7 +918,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
last := ""
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
@@ -1883,14 +1883,9 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// --b2-download-url cloudflare strips the Content-Length
|
||||
// headers (presumably so it can inject stuff) so use the old
|
||||
// length read from the listing.
|
||||
// Additionally, the official examples return S3 headers
|
||||
// instead of native, i.e. no file ID, use ones from listing.
|
||||
if info.Size < 0 {
|
||||
info.Size = o.size
|
||||
}
|
||||
if info.ID == "" {
|
||||
info.ID = o.id
|
||||
}
|
||||
return resp, info, nil
|
||||
}
|
||||
|
||||
|
||||
5
backend/cache/cache.go
vendored
5
backend/cache/cache.go
vendored
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@@ -1087,7 +1086,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return cachedEntries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *list.Helper) error {
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1139,7 +1138,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.recurse(ctx, dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -17,7 +17,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata", "ListP"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata", "SetMetadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
|
||||
@@ -356,8 +356,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.ListR = nil // Recursive listing may cause chunker skip files
|
||||
f.features.ListP = nil // ListP not supported yet
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -1861,8 +1860,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
|
||||
// baseMove chains to the wrapped Move or simulates it by Copy+Delete
|
||||
func (f *Fs) baseMove(ctx context.Context, src fs.Object, remote string, delMode int) (fs.Object, error) {
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.NameTransform = nil // ensure operations.Move does not double-transform here
|
||||
var (
|
||||
dest fs.Object
|
||||
err error
|
||||
|
||||
@@ -46,7 +46,6 @@ func TestIntegration(t *testing.T) {
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
"ListP",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
|
||||
@@ -18,7 +18,7 @@ type CloudinaryEncoder interface {
|
||||
ToStandardPath(string) string
|
||||
// ToStandardName takes name in this encoding and converts
|
||||
// it in Standard encoding.
|
||||
ToStandardName(string, string) string
|
||||
ToStandardName(string) string
|
||||
// Encoded root of the remote (as passed into NewFs)
|
||||
FromStandardFullPath(string) string
|
||||
}
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -105,39 +103,19 @@ func init() {
|
||||
Advanced: true,
|
||||
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
|
||||
},
|
||||
{
|
||||
Name: "adjust_media_files_extensions",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
Help: "Cloudinary handles media formats as a file attribute and strips it from the name, which is unlike most other file systems",
|
||||
},
|
||||
{
|
||||
Name: "media_extensions",
|
||||
Default: []string{
|
||||
"3ds", "3g2", "3gp", "ai", "arw", "avi", "avif", "bmp", "bw",
|
||||
"cr2", "cr3", "djvu", "dng", "eps3", "fbx", "flif", "flv", "gif",
|
||||
"glb", "gltf", "hdp", "heic", "heif", "ico", "indd", "jp2", "jpe",
|
||||
"jpeg", "jpg", "jxl", "jxr", "m2ts", "mov", "mp4", "mpeg", "mts",
|
||||
"mxf", "obj", "ogv", "pdf", "ply", "png", "psd", "svg", "tga",
|
||||
"tif", "tiff", "ts", "u3ma", "usdz", "wdp", "webm", "webp", "wmv"},
|
||||
Advanced: true,
|
||||
Help: "Cloudinary supported media extensions",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
MediaExtensions []string `config:"media_extensions"`
|
||||
AdjustMediaFilesExtensions bool `config:"adjust_media_files_extensions"`
|
||||
CloudName string `config:"cloud_name"`
|
||||
APIKey string `config:"api_key"`
|
||||
APISecret string `config:"api_secret"`
|
||||
UploadPrefix string `config:"upload_prefix"`
|
||||
UploadPreset string `config:"upload_preset"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
|
||||
}
|
||||
|
||||
// Fs represents a remote cloudinary server
|
||||
@@ -225,18 +203,6 @@ func (f *Fs) FromStandardPath(s string) string {
|
||||
|
||||
// FromStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) FromStandardName(s string) string {
|
||||
if f.opt.AdjustMediaFilesExtensions {
|
||||
parsedURL, err := url.Parse(s)
|
||||
ext := ""
|
||||
if err != nil {
|
||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
||||
} else {
|
||||
ext = path.Ext(parsedURL.Path)
|
||||
if slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
||||
s = strings.TrimSuffix(parsedURL.Path, ext)
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
|
||||
}
|
||||
|
||||
@@ -246,20 +212,8 @@ func (f *Fs) ToStandardPath(s string) string {
|
||||
}
|
||||
|
||||
// ToStandardName implementation of the api.CloudinaryEncoder
|
||||
func (f *Fs) ToStandardName(s string, assetURL string) string {
|
||||
ext := ""
|
||||
if f.opt.AdjustMediaFilesExtensions {
|
||||
parsedURL, err := url.Parse(assetURL)
|
||||
if err != nil {
|
||||
fs.Logf(nil, "Error parsing URL: %v", err)
|
||||
} else {
|
||||
ext = path.Ext(parsedURL.Path)
|
||||
if !slices.Contains(f.opt.MediaExtensions, strings.ToLower(strings.TrimPrefix(ext, "."))) {
|
||||
ext = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&") + ext
|
||||
func (f *Fs) ToStandardName(s string) string {
|
||||
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
|
||||
}
|
||||
|
||||
// FromStandardFullPath encodes a full path to Cloudinary standard
|
||||
@@ -377,7 +331,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
}
|
||||
|
||||
for _, asset := range results.Assets {
|
||||
remote := path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName, asset.SecureURL))
|
||||
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
|
||||
if dir != "" {
|
||||
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@@ -266,9 +265,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
}
|
||||
|
||||
// Enable ListP always
|
||||
features.ListP = f.ListP
|
||||
|
||||
// Enable Purge when any upstreams support it
|
||||
if features.Purge == nil {
|
||||
for _, u := range f.upstreams {
|
||||
@@ -813,52 +809,24 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
if f.root == "" && dir == "" {
|
||||
entries := make(fs.DirEntries, 0, len(f.upstreams))
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return callback(entries)
|
||||
return entries, nil
|
||||
}
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
entries, err = u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listP := u.f.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := u.f.List(ctx, uRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, uRemote, wrappedCallback)
|
||||
return u.wrapEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -209,8 +208,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
if !operations.CanServerSideMove(wrappedFs) {
|
||||
f.features.Disable("PutStream")
|
||||
}
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
return f, err
|
||||
}
|
||||
@@ -355,39 +352,11 @@ func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
// found.
|
||||
// List entries and process them
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.processEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
entries, err = f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listP := f.Fs.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
return f.processEntries(entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -294,9 +293,6 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -420,40 +416,11 @@ func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntr
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.encryptEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listP := f.Fs.Features().ListP
|
||||
encryptedDir := f.cipher.EncryptDirName(dir)
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, encryptedDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, encryptedDir, wrappedCallback)
|
||||
return f.encryptEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
// Type definitions specific to Dataverse
|
||||
|
||||
package api
|
||||
|
||||
// DataverseDatasetResponse is returned by the Dataverse dataset API
|
||||
type DataverseDatasetResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data DataverseDataset `json:"data"`
|
||||
}
|
||||
|
||||
// DataverseDataset is the representation of a dataset
|
||||
type DataverseDataset struct {
|
||||
LatestVersion DataverseDatasetVersion `json:"latestVersion"`
|
||||
}
|
||||
|
||||
// DataverseDatasetVersion is the representation of a dataset version
|
||||
type DataverseDatasetVersion struct {
|
||||
LastUpdateTime string `json:"lastUpdateTime"`
|
||||
Files []DataverseFile `json:"files"`
|
||||
}
|
||||
|
||||
// DataverseFile is the representation of a file found in a dataset
|
||||
type DataverseFile struct {
|
||||
DirectoryLabel string `json:"directoryLabel"`
|
||||
DataFile DataverseDataFile `json:"dataFile"`
|
||||
}
|
||||
|
||||
// DataverseDataFile represents file metadata details
|
||||
type DataverseDataFile struct {
|
||||
ID int64 `json:"id"`
|
||||
Filename string `json:"filename"`
|
||||
ContentType string `json:"contentType"`
|
||||
FileSize int64 `json:"filesize"`
|
||||
OriginalFileFormat string `json:"originalFileFormat"`
|
||||
OriginalFileSize int64 `json:"originalFileSize"`
|
||||
OriginalFileName string `json:"originalFileName"`
|
||||
MD5 string `json:"md5"`
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
// Type definitions specific to InvenioRDM
|
||||
|
||||
package api
|
||||
|
||||
// InvenioRecordResponse is the representation of a record stored in InvenioRDM
|
||||
type InvenioRecordResponse struct {
|
||||
Links InvenioRecordResponseLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioRecordResponseLinks represents a record's links
|
||||
type InvenioRecordResponseLinks struct {
|
||||
Self string `json:"self"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponse is the representation of a record's files
|
||||
type InvenioFilesResponse struct {
|
||||
Entries []InvenioFilesResponseEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntry is the representation of a file entry
|
||||
type InvenioFilesResponseEntry struct {
|
||||
Key string `json:"key"`
|
||||
Checksum string `json:"checksum"`
|
||||
Size int64 `json:"size"`
|
||||
Updated string `json:"updated"`
|
||||
MimeType string `json:"mimetype"`
|
||||
Links InvenioFilesResponseEntryLinks `json:"links"`
|
||||
}
|
||||
|
||||
// InvenioFilesResponseEntryLinks represents file links details
|
||||
type InvenioFilesResponseEntryLinks struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
// Package api has general type definitions for doi
|
||||
package api
|
||||
|
||||
// DoiResolverResponse is returned by the DOI resolver API
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
type DoiResolverResponse struct {
|
||||
ResponseCode int `json:"responseCode"`
|
||||
Handle string `json:"handle"`
|
||||
Values []DoiResolverResponseValue `json:"values"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValue is a single handle record value
|
||||
type DoiResolverResponseValue struct {
|
||||
Index int `json:"index"`
|
||||
Type string `json:"type"`
|
||||
Data DoiResolverResponseValueData `json:"data"`
|
||||
TTL int `json:"ttl"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
}
|
||||
|
||||
// DoiResolverResponseValueData is the data held in a handle value
|
||||
type DoiResolverResponseValueData struct {
|
||||
Format string `json:"format"`
|
||||
Value any `json:"value"`
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
// Implementation for Dataverse
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on a Dataverse intallation
|
||||
func activateDataverse(resolvedURL *url.URL) (isActive bool) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
return persistentID != ""
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on a Dataverse installation
|
||||
func resolveDataverseEndpoint(resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
queryValues := resolvedURL.Query()
|
||||
persistentID := queryValues.Get("persistentId")
|
||||
|
||||
query := url.Values{}
|
||||
query.Add("persistentId", persistentID)
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/datasets/:persistentId/", RawQuery: query.Encode()})
|
||||
|
||||
return Dataverse, endpointURL, nil
|
||||
}
|
||||
|
||||
// dataverseProvider implements the doiProvider interface for Dataverse installations
|
||||
type dataverseProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (dp *dataverseProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := dp.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := dp.f.endpoint
|
||||
var res *http.Response
|
||||
var result api.DataverseDatasetResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
Parameters: filesURL.Query(),
|
||||
}
|
||||
err = dp.f.pacer.Call(func() (bool, error) {
|
||||
res, err = dp.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, result.Data.LatestVersion.LastUpdateTime)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(dp.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
for _, file := range result.Data.LatestVersion.Files {
|
||||
contentURLPath := fmt.Sprintf("/api/access/datafile/%d", file.DataFile.ID)
|
||||
query := url.Values{}
|
||||
query.Add("format", "original")
|
||||
contentURL := dp.f.endpoint.ResolveReference(&url.URL{Path: contentURLPath, RawQuery: query.Encode()})
|
||||
entry := &Object{
|
||||
fs: dp.f,
|
||||
remote: path.Join(file.DirectoryLabel, file.DataFile.Filename),
|
||||
contentURL: contentURL.String(),
|
||||
size: file.DataFile.FileSize,
|
||||
modTime: modTime,
|
||||
md5: file.DataFile.MD5,
|
||||
contentType: file.DataFile.ContentType,
|
||||
}
|
||||
if file.DataFile.OriginalFileName != "" {
|
||||
entry.remote = path.Join(file.DirectoryLabel, file.DataFile.OriginalFileName)
|
||||
entry.size = file.DataFile.OriginalFileSize
|
||||
entry.contentType = file.DataFile.OriginalFileFormat
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
dp.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newDataverseProvider(f *Fs) doiProvider {
|
||||
return &dataverseProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
@@ -1,649 +0,0 @@
|
||||
// Package doi provides a filesystem interface for digital objects identified by DOIs.
|
||||
//
|
||||
// See: https://www.doi.org/the-identifier/what-is-a-doi/
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/cache"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// the URL of the DOI resolver
|
||||
//
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
doiResolverAPIURL = "https://doi.org/api"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("doi remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "doi",
|
||||
Description: "DOI datasets",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: []fs.Option{{
|
||||
Name: "doi",
|
||||
Help: "The DOI or the doi.org URL.",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: fs.ConfigProvider,
|
||||
Help: `DOI provider.
|
||||
|
||||
The DOI provider can be set when rclone does not automatically recognize a supported DOI provider.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "auto",
|
||||
Help: "Auto-detect provider",
|
||||
},
|
||||
{
|
||||
Value: string(Zenodo),
|
||||
Help: "Zenodo",
|
||||
}, {
|
||||
Value: string(Dataverse),
|
||||
Help: "Dataverse",
|
||||
}, {
|
||||
Value: string(Invenio),
|
||||
Help: "Invenio",
|
||||
}},
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Provider defines the type of provider hosting the DOI
|
||||
type Provider string
|
||||
|
||||
const (
|
||||
// Zenodo provider, see https://zenodo.org
|
||||
Zenodo Provider = "zenodo"
|
||||
// Dataverse provider, see https://dataverse.harvard.edu
|
||||
Dataverse Provider = "dataverse"
|
||||
// Invenio provider, see https://inveniordm.docs.cern.ch
|
||||
Invenio Provider = "invenio"
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Doi string `config:"doi"` // The DOI, a digital identifier of an object, usually a dataset
|
||||
Provider string `config:"provider"` // The DOI provider
|
||||
DoiResolverAPIURL string `config:"doi_resolver_api_url"` // The URL of the DOI resolver API to use.
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
provider Provider // the DOI provider
|
||||
doiProvider doiProvider // the interface used to interact with the DOI provider
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
endpoint *url.URL // the main API endpoint for this remote
|
||||
endpointURL string // endpoint as a string
|
||||
srv *rest.Client // the connection to the server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
cache *cache.Cache // a cache for the remote metadata
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // the remote path
|
||||
contentURL string // the URL where the contents of the file can be downloaded
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
contentType string // content type of the object
|
||||
md5 string // MD5 hash of the object content
|
||||
}
|
||||
|
||||
// doiProvider is the interface used to list objects in a DOI
|
||||
type doiProvider interface {
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
ListEntries(ctx context.Context) (entries []*Object, err error)
|
||||
}
|
||||
|
||||
// Parse the input string as a DOI
|
||||
// Examples:
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
func parseDoi(doi string) string {
|
||||
doiURL, err := url.Parse(doi)
|
||||
if err != nil {
|
||||
return doi
|
||||
}
|
||||
if doiURL.Scheme == "doi" {
|
||||
return strings.TrimLeft(strings.TrimPrefix(doi, "doi:"), "/")
|
||||
}
|
||||
if strings.HasSuffix(doiURL.Hostname(), "doi.org") {
|
||||
return strings.TrimLeft(doiURL.Path, "/")
|
||||
}
|
||||
return doi
|
||||
}
|
||||
|
||||
// Resolve a DOI to a URL
|
||||
// Reference: https://www.doi.org/the-identifier/resources/factsheets/doi-resolution-documentation
|
||||
func resolveDoiURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (doiURL *url.URL, err error) {
|
||||
resolverURL := opt.DoiResolverAPIURL
|
||||
if resolverURL == "" {
|
||||
resolverURL = doiResolverAPIURL
|
||||
}
|
||||
|
||||
var result api.DoiResolverResponse
|
||||
params := url.Values{}
|
||||
params.Add("index", "1")
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolverURL,
|
||||
Path: "/handles/" + opt.Doi,
|
||||
Parameters: params,
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.ResponseCode != 1 {
|
||||
return nil, fmt.Errorf("could not resolve DOI (error code %d)", result.ResponseCode)
|
||||
}
|
||||
resolvedURLStr := ""
|
||||
for _, value := range result.Values {
|
||||
if value.Type == "URL" && value.Data.Format == "string" {
|
||||
valueStr, ok := value.Data.Value.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not resolve DOI (incorrect response format)")
|
||||
}
|
||||
resolvedURLStr = valueStr
|
||||
}
|
||||
}
|
||||
resolvedURL, err := url.Parse(resolvedURLStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resolvedURL, nil
|
||||
}
|
||||
|
||||
// Resolve the passed configuration into a provider and enpoint
|
||||
func resolveEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, opt *Options) (provider Provider, endpoint *url.URL, err error) {
|
||||
resolvedURL, err := resolveDoiURL(ctx, srv, pacer, opt)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
switch opt.Provider {
|
||||
case string(Dataverse):
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
case string(Invenio):
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
case string(Zenodo):
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
|
||||
hostname := strings.ToLower(resolvedURL.Hostname())
|
||||
if hostname == "dataverse.harvard.edu" || activateDataverse(resolvedURL) {
|
||||
return resolveDataverseEndpoint(resolvedURL)
|
||||
}
|
||||
if hostname == "zenodo.org" || strings.HasSuffix(hostname, ".zenodo.org") {
|
||||
return resolveZenodoEndpoint(ctx, srv, pacer, resolvedURL, opt.Doi)
|
||||
}
|
||||
if activateInvenio(ctx, srv, pacer, resolvedURL) {
|
||||
return resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("provider '%s' is not supported", resolvedURL.Hostname())
|
||||
}
|
||||
|
||||
// Make the http connection from the passed options
|
||||
func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err error) {
|
||||
provider, endpoint, err := resolveEndpoint(ctx, f.srv, f.pacer, opt)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Update f with the new parameters
|
||||
f.srv.SetRoot(endpoint.ResolveReference(&url.URL{Path: "/"}).String())
|
||||
f.endpoint = endpoint
|
||||
f.endpointURL = endpoint.String()
|
||||
f.provider = provider
|
||||
f.opt.Provider = string(provider)
|
||||
|
||||
switch f.provider {
|
||||
case Dataverse:
|
||||
f.doiProvider = newDataverseProvider(f)
|
||||
case Invenio, Zenodo:
|
||||
f.doiProvider = newInvenioProvider(f)
|
||||
default:
|
||||
return false, fmt.Errorf("provider type '%s' not supported", f.provider)
|
||||
}
|
||||
|
||||
// Determine if the root is a file
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.remote == f.root {
|
||||
isFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this res and err
|
||||
// deserve to be retried. It returns the err as a convenience.
|
||||
func shouldRetry(ctx context.Context, res *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opt.Doi = parseDoi(opt.Doi)
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: rest.NewClient(client),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
cache: cache.New(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
isFile, err := f.httpConnection(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isFile {
|
||||
// return an error with an fs which points to the parent
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.root = newRoot
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("DOI %s", f.opt.Doi)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
// return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
entries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
remoteFullPath := remote
|
||||
if f.root != "" {
|
||||
remoteFullPath = path.Join(f.root, remote)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Remote() == remoteFullPath {
|
||||
return entry, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fileEntries, err := f.doiProvider.ListEntries(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %q: %w", dir, err)
|
||||
}
|
||||
|
||||
fullDir := path.Join(f.root, dir)
|
||||
if fullDir != "" {
|
||||
fullDir += "/"
|
||||
}
|
||||
|
||||
dirPaths := map[string]bool{}
|
||||
for _, entry := range fileEntries {
|
||||
// First, filter out files not in `fullDir`
|
||||
if !strings.HasPrefix(entry.remote, fullDir) {
|
||||
continue
|
||||
}
|
||||
// Then, find entries in subfolers
|
||||
remotePath := entry.remote
|
||||
if fullDir != "" {
|
||||
remotePath = strings.TrimLeft(strings.TrimPrefix(remotePath, fullDir), "/")
|
||||
}
|
||||
parts := strings.SplitN(remotePath, "/", 2)
|
||||
if len(parts) == 1 {
|
||||
newEntry := *entry
|
||||
newEntry.remote = path.Join(dir, remotePath)
|
||||
entries = append(entries, &newEntry)
|
||||
} else {
|
||||
dirPaths[path.Join(dir, parts[0])] = true
|
||||
}
|
||||
}
|
||||
|
||||
for dirPath := range dirPaths {
|
||||
entry := fs.NewDir(dirPath, time.Time{})
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc.)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: o.contentURL,
|
||||
Options: options,
|
||||
}
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
|
||||
// Handle non-compliant redirects
|
||||
if res.Header.Get("Location") != "" {
|
||||
newURL, err := res.Location()
|
||||
if err == nil {
|
||||
opts.RootURL = newURL.String()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "metadata",
|
||||
Short: "Show metadata about the DOI.",
|
||||
Long: `This command returns a JSON object with some information about the DOI.
|
||||
|
||||
rclone backend medatadata doi:
|
||||
|
||||
It returns a JSON object representing metadata about the DOI.
|
||||
`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running doi backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=doi: -o doi=NEW_DOI
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the doi backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
//
|
||||
// The command run is name
|
||||
// args may be used to read arguments from
|
||||
// opts may be used to read optional arguments from
|
||||
//
|
||||
// The result should be capable of being JSON encoded
|
||||
// If it is a string or a []string it will be shown to the user
|
||||
// otherwise it will be JSON encoded and shown to the user like that
|
||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
||||
switch name {
|
||||
case "metadata":
|
||||
return f.ShowMetadata(ctx)
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
_, err = f.httpConnection(ctx, &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
// ShowMetadata returns some metadata about the corresponding DOI
|
||||
func (f *Fs) ShowMetadata(ctx context.Context) (metadata interface{}, err error) {
|
||||
doiURL, err := url.Parse("https://doi.org/" + f.opt.Doi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := map[string]any{}
|
||||
info["DOI"] = f.opt.Doi
|
||||
info["URL"] = doiURL.String()
|
||||
info["metadataURL"] = f.endpointURL
|
||||
info["provider"] = f.provider
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Commander = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
)
|
||||
@@ -1,260 +0,0 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var remoteName = "TestDoi"
|
||||
|
||||
func TestParseDoi(t *testing.T) {
|
||||
// 10.1000/182 -> 10.1000/182
|
||||
doi := "10.1000/182"
|
||||
parsed := parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://doi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// https://dx.doi.org/10.1000/182 -> 10.1000/182
|
||||
doi = "https://dxdoi.org/10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi:10.1000/182 -> 10.1000/182
|
||||
doi = "doi:10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
|
||||
// doi://10.1000/182 -> 10.1000/182
|
||||
doi = "doi://10.1000/182"
|
||||
parsed = parseDoi(doi)
|
||||
assert.Equal(t, "10.1000/182", parsed)
|
||||
}
|
||||
|
||||
// prepareMockDoiResolverServer prepares a test server to resolve DOIs
|
||||
func prepareMockDoiResolverServer(t *testing.T, resolvedURL string) (doiResolverAPIURL string) {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for resolving DOIs
|
||||
mux.HandleFunc("GET /api/handles/{handle...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are resolving a DOI
|
||||
handle := strings.TrimPrefix(r.URL.Path, "/api/handles/")
|
||||
assert.NotEmpty(t, handle)
|
||||
index := r.URL.Query().Get("index")
|
||||
assert.Equal(t, "1", index)
|
||||
|
||||
// Return the most basic response
|
||||
result := api.DoiResolverResponse{
|
||||
ResponseCode: 1,
|
||||
Handle: handle,
|
||||
Values: []api.DoiResolverResponseValue{
|
||||
{
|
||||
Index: 1,
|
||||
Type: "URL",
|
||||
Data: api.DoiResolverResponseValueData{
|
||||
Format: "string",
|
||||
Value: resolvedURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts.URL + "/api"
|
||||
}
|
||||
|
||||
func md5Sum(text string) string {
|
||||
hash := md5.Sum([]byte(text))
|
||||
return hex.EncodeToString(hash[:])
|
||||
}
|
||||
|
||||
// prepareMockZenodoServer prepares a test server that mocks Zenodo.org
|
||||
func prepareMockZenodoServer(t *testing.T, files map[string]string) *httptest.Server {
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Handle requests for a single record
|
||||
mux.HandleFunc("GET /api/records/{recordID...}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning data about a single record
|
||||
recordID := strings.TrimPrefix(r.URL.Path, "/api/records/")
|
||||
assert.NotEmpty(t, recordID)
|
||||
|
||||
// Return the most basic response
|
||||
selfURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
selfURL = selfURL.JoinPath(r.URL.String())
|
||||
result := api.InvenioRecordResponse{
|
||||
Links: api.InvenioRecordResponseLinks{
|
||||
Self: selfURL.String(),
|
||||
},
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for listing files in a record
|
||||
mux.HandleFunc("GET /api/records/{record}/files", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Return the most basic response
|
||||
filesBaseURL, err := url.Parse("http://" + r.Host)
|
||||
require.NoError(t, err)
|
||||
filesBaseURL = filesBaseURL.JoinPath("/api/files/")
|
||||
|
||||
entries := []api.InvenioFilesResponseEntry{}
|
||||
for filename, contents := range files {
|
||||
entries = append(entries,
|
||||
api.InvenioFilesResponseEntry{
|
||||
Key: filename,
|
||||
Checksum: md5Sum(contents),
|
||||
Size: int64(len(contents)),
|
||||
Updated: time.Now().UTC().Format(time.RFC3339),
|
||||
MimeType: "text/plain; charset=utf-8",
|
||||
Links: api.InvenioFilesResponseEntryLinks{
|
||||
Content: filesBaseURL.JoinPath(filename).String(),
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
result := api.InvenioFilesResponse{
|
||||
Entries: entries,
|
||||
}
|
||||
resultBytes, err := json.Marshal(result)
|
||||
require.NoError(t, err)
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
_, err = w.Write(resultBytes)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
// Handle requests for file contents
|
||||
mux.HandleFunc("/api/files/{file}", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Check that we are returning the contents of a file
|
||||
filename := strings.TrimPrefix(r.URL.Path, "/api/files/")
|
||||
assert.NotEmpty(t, filename)
|
||||
contents, found := files[filename]
|
||||
if !found {
|
||||
w.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
|
||||
// Return the most basic response
|
||||
_, err := w.Write([]byte(contents))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(mux)
|
||||
|
||||
// Close the server at the end of the test
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return ts
|
||||
}
|
||||
|
||||
func TestZenodoRemote(t *testing.T) {
|
||||
recordID := "2600782"
|
||||
doi := "10.5281/zenodo.2600782"
|
||||
|
||||
// The files in the dataset
|
||||
files := map[string]string{
|
||||
"README.md": "This is a dataset.",
|
||||
"data.txt": "Some data",
|
||||
}
|
||||
|
||||
ts := prepareMockZenodoServer(t, files)
|
||||
resolvedURL := ts.URL + "/record/" + recordID
|
||||
|
||||
doiResolverAPIURL := prepareMockDoiResolverServer(t, resolvedURL)
|
||||
|
||||
testConfig := configmap.Simple{
|
||||
"type": "doi",
|
||||
"doi": doi,
|
||||
"provider": "zenodo",
|
||||
"doi_resolver_api_url": doiResolverAPIURL,
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", testConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test listing the DOI files
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, len(files), len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "README.md", e.Remote())
|
||||
assert.Equal(t, int64(18), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "data.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test reading the DOI files
|
||||
o, err := f.NewObject(context.Background(), "README.md")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(18), o.Size())
|
||||
md5Hash, err := o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "464352b1cab5240e44528a56fda33d9d", md5Hash)
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["README.md"]), data)
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
|
||||
o, err = f.NewObject(context.Background(), "data.txt")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
md5Hash, err = o.Hash(context.Background(), hash.MD5)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "5b82f8bf4df2bfb0e66ccaa7306fd024", md5Hash)
|
||||
fd, err = o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, []byte(files["data.txt"]), data)
|
||||
do, ok = o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
// Test DOI filesystem interface
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDoi:",
|
||||
NilObject: (*Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,164 +0,0 @@
|
||||
// Implementation for InvenioRDM
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var invenioRecordRegex = regexp.MustCompile(`\/records?\/(.+)`)
|
||||
|
||||
// Returns true if resolvedURL is likely a DOI hosted on an InvenioRDM intallation
|
||||
func activateInvenio(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (isActive bool) {
|
||||
_, _, err := resolveInvenioEndpoint(ctx, srv, pacer, resolvedURL)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on an InvenioRDM installation
|
||||
func resolveInvenioEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (provider Provider, endpoint *url.URL, err error) {
|
||||
var res *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err = srv.Call(ctx, &opts)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// First, attempt to grab the API URL from the headers
|
||||
var linksetURL *url.URL
|
||||
links := parseLinkHeader(res.Header.Get("Link"))
|
||||
for _, link := range links {
|
||||
if link.Rel == "linkset" && link.Type == "application/linkset+json" {
|
||||
parsed, err := url.Parse(link.Href)
|
||||
if err == nil {
|
||||
linksetURL = parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if linksetURL != nil {
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, linksetURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "using linkset URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
// If there is no linkset header, try to grab the record ID from the URL
|
||||
recordID := ""
|
||||
resURL := res.Request.URL
|
||||
match := invenioRecordRegex.FindStringSubmatch(resURL.EscapedPath())
|
||||
if match != nil {
|
||||
recordID = match[1]
|
||||
guessedURL := res.Request.URL.ResolveReference(&url.URL{
|
||||
Path: "/api/records/" + recordID,
|
||||
})
|
||||
endpoint, err = checkInvenioAPIURL(ctx, srv, pacer, guessedURL)
|
||||
if err == nil {
|
||||
return Invenio, endpoint, nil
|
||||
}
|
||||
fs.Logf(nil, "guessing the URL failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("could not resolve the Invenio API endpoint for '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
func checkInvenioAPIURL(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL) (endpoint *url.URL, err error) {
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: resolvedURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.Links.Self == "" {
|
||||
return nil, fmt.Errorf("could not parse API response from '%s'", resolvedURL.String())
|
||||
}
|
||||
return url.Parse(result.Links.Self)
|
||||
}
|
||||
|
||||
// invenioProvider implements the doiProvider interface for InvenioRDM installations
|
||||
type invenioProvider struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// ListEntries returns the full list of entries found at the remote, regardless of root
|
||||
func (ip *invenioProvider) ListEntries(ctx context.Context) (entries []*Object, err error) {
|
||||
// Use the cache if populated
|
||||
cachedEntries, found := ip.f.cache.GetMaybe("files")
|
||||
if found {
|
||||
parsedEntries, ok := cachedEntries.([]Object)
|
||||
if ok {
|
||||
for _, entry := range parsedEntries {
|
||||
newEntry := entry
|
||||
entries = append(entries, &newEntry)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
}
|
||||
|
||||
filesURL := ip.f.endpoint.JoinPath("files")
|
||||
var result api.InvenioFilesResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: strings.TrimLeft(filesURL.EscapedPath(), "/"),
|
||||
}
|
||||
err = ip.f.pacer.Call(func() (bool, error) {
|
||||
res, err := ip.f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("readDir failed: %w", err)
|
||||
}
|
||||
for _, file := range result.Entries {
|
||||
modTime, modTimeErr := time.Parse(time.RFC3339, file.Updated)
|
||||
if modTimeErr != nil {
|
||||
fs.Logf(ip.f, "error: could not parse last update time %v", modTimeErr)
|
||||
modTime = timeUnset
|
||||
}
|
||||
entry := &Object{
|
||||
fs: ip.f,
|
||||
remote: file.Key,
|
||||
contentURL: file.Links.Content,
|
||||
size: file.Size,
|
||||
modTime: modTime,
|
||||
contentType: file.MimeType,
|
||||
md5: strings.TrimPrefix(file.Checksum, "md5:"),
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
// Populate the cache
|
||||
cacheEntries := []Object{}
|
||||
for _, entry := range entries {
|
||||
cacheEntries = append(cacheEntries, *entry)
|
||||
}
|
||||
ip.f.cache.Put("files", cacheEntries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func newInvenioProvider(f *Fs) doiProvider {
|
||||
return &invenioProvider{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var linkRegex = regexp.MustCompile(`^<(.+)>$`)
|
||||
var valueRegex = regexp.MustCompile(`^"(.+)"$`)
|
||||
|
||||
// headerLink represents a link as presented in HTTP headers
|
||||
// MDN Reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Link
|
||||
type headerLink struct {
|
||||
Href string
|
||||
Rel string
|
||||
Type string
|
||||
Extras map[string]string
|
||||
}
|
||||
|
||||
func parseLinkHeader(header string) (links []headerLink) {
|
||||
for _, link := range strings.Split(header, ",") {
|
||||
link = strings.TrimSpace(link)
|
||||
parsed := parseLink(link)
|
||||
if parsed != nil {
|
||||
links = append(links, *parsed)
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
func parseLink(link string) (parsedLink *headerLink) {
|
||||
var parts []string
|
||||
for _, part := range strings.Split(link, ";") {
|
||||
parts = append(parts, strings.TrimSpace(part))
|
||||
}
|
||||
|
||||
match := linkRegex.FindStringSubmatch(parts[0])
|
||||
if match == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := &headerLink{
|
||||
Href: match[1],
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
|
||||
for _, keyValue := range parts[1:] {
|
||||
parsed := parseKeyValue(keyValue)
|
||||
if parsed != nil {
|
||||
key, value := parsed[0], parsed[1]
|
||||
switch strings.ToLower(key) {
|
||||
case "rel":
|
||||
result.Rel = value
|
||||
case "type":
|
||||
result.Type = value
|
||||
default:
|
||||
result.Extras[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseKeyValue(keyValue string) []string {
|
||||
parts := strings.SplitN(keyValue, "=", 2)
|
||||
if parts[0] == "" || len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
match := valueRegex.FindStringSubmatch(parts[1])
|
||||
if match != nil {
|
||||
parts[1] = match[1]
|
||||
return parts
|
||||
}
|
||||
return parts
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package doi
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseLinkHeader(t *testing.T) {
|
||||
header := "<https://zenodo.org/api/records/15063252> ; rel=\"linkset\" ; type=\"application/linkset+json\""
|
||||
links := parseLinkHeader(header)
|
||||
expected := headerLink{
|
||||
Href: "https://zenodo.org/api/records/15063252",
|
||||
Rel: "linkset",
|
||||
Type: "application/linkset+json",
|
||||
Extras: map[string]string{},
|
||||
}
|
||||
assert.Contains(t, links, expected)
|
||||
|
||||
header = "<https://api.example.com/issues?page=2>; rel=\"prev\", <https://api.example.com/issues?page=4>; rel=\"next\", <https://api.example.com/issues?page=10>; rel=\"last\", <https://api.example.com/issues?page=1>; rel=\"first\""
|
||||
links = parseLinkHeader(header)
|
||||
expectedList := []headerLink{{
|
||||
Href: "https://api.example.com/issues?page=2",
|
||||
Rel: "prev",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=4",
|
||||
Rel: "next",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=10",
|
||||
Rel: "last",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}, {
|
||||
Href: "https://api.example.com/issues?page=1",
|
||||
Rel: "first",
|
||||
Type: "",
|
||||
Extras: map[string]string{},
|
||||
}}
|
||||
assert.Equal(t, links, expectedList)
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
// Implementation for Zenodo
|
||||
|
||||
package doi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/rclone/rclone/backend/doi/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
var zenodoRecordRegex = regexp.MustCompile(`zenodo[.](.+)`)
|
||||
|
||||
// Resolve the main API endpoint for a DOI hosted on Zenodo
|
||||
func resolveZenodoEndpoint(ctx context.Context, srv *rest.Client, pacer *fs.Pacer, resolvedURL *url.URL, doi string) (provider Provider, endpoint *url.URL, err error) {
|
||||
match := zenodoRecordRegex.FindStringSubmatch(doi)
|
||||
if match == nil {
|
||||
return "", nil, fmt.Errorf("could not derive API endpoint URL from '%s'", resolvedURL.String())
|
||||
}
|
||||
|
||||
recordID := match[1]
|
||||
endpointURL := resolvedURL.ResolveReference(&url.URL{Path: "/api/records/" + recordID})
|
||||
|
||||
var result api.InvenioRecordResponse
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: endpointURL.String(),
|
||||
}
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
res, err := srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, res, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
endpointURL, err = url.Parse(result.Links.Self)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return Zenodo, endpointURL, nil
|
||||
}
|
||||
@@ -38,8 +38,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -1745,7 +1745,7 @@ func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Met
|
||||
}
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true, true)
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
@@ -1776,7 +1776,7 @@ func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata)
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true, true)
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
@@ -2189,7 +2189,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
wg := sync.WaitGroup{}
|
||||
in := make(chan listREntry, listRInputBuffer)
|
||||
out := make(chan error, f.ci.Checkers)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
|
||||
@@ -507,7 +507,7 @@ type updateMetadataFn func(context.Context, *drive.File) error
|
||||
//
|
||||
// It returns a callback which should be called to finish the updates
|
||||
// after the data is uploaded.
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update, isFolder bool) (callback updateMetadataFn, err error) {
|
||||
func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs.Metadata, update bool) (callback updateMetadataFn, err error) {
|
||||
callbackFns := []updateMetadataFn{}
|
||||
callback = func(ctx context.Context, info *drive.File) error {
|
||||
for _, fn := range callbackFns {
|
||||
@@ -532,9 +532,7 @@ func (f *Fs) updateMetadata(ctx context.Context, updateInfo *drive.File, meta fs
|
||||
}
|
||||
switch k {
|
||||
case "copy-requires-writer-permission":
|
||||
if isFolder {
|
||||
fs.Debugf(f, "Ignoring %s=%s as can't set on folders", k, v)
|
||||
} else if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
if err := parseBool(&updateInfo.CopyRequiresWriterPermission); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "writers-can-share":
|
||||
@@ -631,7 +629,7 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update, false)
|
||||
callback, err = f.updateMetadata(ctx, updateInfo, meta, update)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update metadata from source object: %w", err)
|
||||
}
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
// Package api defines types for interacting with the FileLu API.
|
||||
package api
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// CreateFolderResponse represents the response for creating a folder.
|
||||
type CreateFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
FldID interface{} `json:"fld_id"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFolderResponse represents the response for deleting a folder.
|
||||
type DeleteFolderResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// FolderListResponse represents the response for listing folders.
|
||||
type FolderListResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
FileCode string `json:"file_code"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"files"`
|
||||
Folders []struct {
|
||||
Name string `json:"name"`
|
||||
FldID json.Number `json:"fld_id"`
|
||||
Path string `json:"path"`
|
||||
} `json:"folders"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileDirectLinkResponse represents the response for a direct link to a file.
|
||||
type FileDirectLinkResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result struct {
|
||||
URL string `json:"url"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// FileInfoResponse represents the response for file information.
|
||||
type FileInfoResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Size string `json:"size"`
|
||||
Name string `json:"name"`
|
||||
FileCode string `json:"filecode"`
|
||||
Hash string `json:"hash"`
|
||||
Status int `json:"status"`
|
||||
} `json:"result"`
|
||||
}
|
||||
|
||||
// DeleteFileResponse represents the response for deleting a file.
|
||||
type DeleteFileResponse struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
// AccountInfoResponse represents the response for account information.
|
||||
type AccountInfoResponse struct {
|
||||
Status int `json:"status"` // HTTP status code of the response.
|
||||
Msg string `json:"msg"` // Message describing the response.
|
||||
Result struct {
|
||||
PremiumExpire string `json:"premium_expire"` // Expiration date of premium access.
|
||||
Email string `json:"email"` // User's email address.
|
||||
UType string `json:"utype"` // User type (e.g., premium or free).
|
||||
Storage string `json:"storage"` // Total storage available to the user.
|
||||
StorageUsed string `json:"storage_used"` // Amount of storage used.
|
||||
} `json:"result"` // Nested result structure containing account details.
|
||||
}
|
||||
@@ -1,366 +0,0 @@
|
||||
// Package filelu provides an interface to the FileLu storage system.
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Register the backend with Rclone
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "filelu",
|
||||
Description: "FileLu Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "key",
|
||||
Help: "Your FileLu Rclone key from My Account",
|
||||
Required: true,
|
||||
Sensitive: true,
|
||||
},
|
||||
{
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeExclamation |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeSingleQuote |
|
||||
encoder.EncodeBackQuote |
|
||||
encoder.EncodeQuestion |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeColon |
|
||||
encoder.EncodeAsterisk |
|
||||
encoder.EncodePipe |
|
||||
encoder.EncodeHash |
|
||||
encoder.EncodePercent |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeLeftPeriod |
|
||||
encoder.EncodeLeftTilde |
|
||||
encoder.EncodeLeftCrLfHtVt |
|
||||
encoder.EncodeRightPeriod |
|
||||
encoder.EncodeRightCrLfHtVt |
|
||||
encoder.EncodeSquareBracket |
|
||||
encoder.EncodeSemicolon |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeDot),
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for the FileLu backend
|
||||
type Options struct {
|
||||
Key string `config:"key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents the FileLu file system
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
features *fs.Features
|
||||
endpoint string
|
||||
pacer *pacer.Pacer
|
||||
srv *rest.Client
|
||||
client *http.Client
|
||||
targetFile string
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object for FileLu
|
||||
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse config: %w", err)
|
||||
}
|
||||
|
||||
if opt.Key == "" {
|
||||
return nil, fmt.Errorf("FileLu Rclone Key is required")
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
if strings.TrimSpace(root) == "" {
|
||||
root = ""
|
||||
}
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
filename := ""
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
endpoint: "https://filelu.com/rclone",
|
||||
client: client,
|
||||
srv: rest.NewClient(client).SetRoot("https://filelu.com/rclone"),
|
||||
pacer: pacer.New(),
|
||||
targetFile: filename,
|
||||
root: root,
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
WriteMetadata: false,
|
||||
SlowHash: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
rootContainer, rootDirectory := rootSplit(f.root)
|
||||
if rootContainer != "" && rootDirectory != "" {
|
||||
// Check to see if the (container,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.root = strings.Trim(newRoot, "/")
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.root = strings.Trim(oldRoot, "/")
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mkdir to create directory on remote server.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Clean(f.root + "/" + dir)
|
||||
_, err := f.createFolder(ctx, fullPath)
|
||||
return err
|
||||
}
|
||||
|
||||
// About provides usage statistics for the remote
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
accountInfo, err := f.getAccountInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalStorage, err := parseStorageToBytes(accountInfo.Result.Storage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse total storage: %w", err)
|
||||
}
|
||||
|
||||
usedStorage, err := parseStorageToBytes(accountInfo.Result.StorageUsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse used storage: %w", err)
|
||||
}
|
||||
|
||||
return &fs.Usage{
|
||||
Total: fs.NewUsageValue(totalStorage), // Total bytes available
|
||||
Used: fs.NewUsageValue(usedStorage), // Total bytes used
|
||||
Free: fs.NewUsageValue(totalStorage - usedStorage),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Purge deletes the directory and all its contents
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// List returns a list of files and folders
|
||||
// List returns a list of files and folders for the given directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
// Compose full path for API call
|
||||
fullPath := path.Join(f.root, dir)
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
if fullPath == "/" {
|
||||
fullPath = ""
|
||||
}
|
||||
|
||||
var entries fs.DirEntries
|
||||
result, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fldMap := map[string]bool{}
|
||||
for _, folder := range result.Result.Folders {
|
||||
fldMap[folder.FldID.String()] = true
|
||||
if f.root == "" && dir == "" && strings.Contains(folder.Path, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
paths := strings.Split(folder.Path, fullPath+"/")
|
||||
remote := paths[0]
|
||||
if len(paths) > 1 {
|
||||
remote = paths[1]
|
||||
}
|
||||
|
||||
if strings.Contains(remote, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
pathsWithoutRoot := strings.Split(folder.Path, "/"+f.root+"/")
|
||||
remotePathWithoutRoot := pathsWithoutRoot[0]
|
||||
if len(pathsWithoutRoot) > 1 {
|
||||
remotePathWithoutRoot = pathsWithoutRoot[1]
|
||||
}
|
||||
remotePathWithoutRoot = strings.TrimPrefix(remotePathWithoutRoot, "/")
|
||||
entries = append(entries, fs.NewDir(remotePathWithoutRoot, time.Now()))
|
||||
}
|
||||
for _, file := range result.Result.Files {
|
||||
if _, ok := fldMap[file.FldID.String()]; ok {
|
||||
continue
|
||||
}
|
||||
remote := path.Join(dir, file.Name)
|
||||
// trim leading slashes
|
||||
remote = strings.TrimPrefix(remote, "/")
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: file.Size,
|
||||
modTime: time.Now(),
|
||||
}
|
||||
entries = append(entries, obj)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put uploads a file directly to the destination folder in the FileLu storage system.
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if src.Size() == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := f.uploadFile(ctx, in, src.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newObject := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}
|
||||
fs.Infof(f, "Put: Successfully uploaded new file %q", src.Remote())
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
// Move moves the file to the specified location
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, destinationPath string) (fs.Object, error) {
|
||||
|
||||
if strings.HasPrefix(destinationPath, "/") || strings.Contains(destinationPath, ":\\") {
|
||||
dir := path.Dir(destinationPath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
dest, err := os.Create(destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := io.Copy(dest, reader); err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file content: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
reader, err := src.Open(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open source object: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := reader.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close file body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = f.uploadFile(ctx, reader, destinationPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file to destination: %w", err)
|
||||
}
|
||||
|
||||
if err := src.Remove(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to delete source file: %w", err)
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: destinationPath,
|
||||
size: src.Size(),
|
||||
modTime: src.ModTime(ctx),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Rmdir removes a directory
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fullPath := path.Join(f.root, dir)
|
||||
if fullPath != "" {
|
||||
fullPath = "/" + strings.Trim(fullPath, "/")
|
||||
}
|
||||
|
||||
// Step 1: Check if folder is empty
|
||||
listResp, err := f.getFolderList(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(listResp.Result.Files) > 0 || len(listResp.Result.Folders) > 0 {
|
||||
return fmt.Errorf("Rmdir: directory %q is not empty", fullPath)
|
||||
}
|
||||
|
||||
// Step 2: Delete the folder
|
||||
return f.deleteFolder(ctx, fullPath)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -1,324 +0,0 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/filelu/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createFolder creates a folder at the specified path.
|
||||
func (f *Fs) createFolder(ctx context.Context, dirPath string) (*api.CreateFolderResponse, error) {
|
||||
encodedDir := f.fromStandardPath(dirPath)
|
||||
apiURL := fmt.Sprintf("%s/folder/create?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key), // assuming f.opt.Key is the correct field
|
||||
)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
result := api.CreateFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var innerErr error
|
||||
resp, innerErr = f.client.Do(req)
|
||||
return fserrors.ShouldRetry(innerErr), innerErr
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
err = json.NewDecoder(resp.Body).Decode(&result)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
fs.Infof(f, "Successfully created folder %q with ID %v", dirPath, result.Result.FldID)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFolderList List both files and folders in a directory.
|
||||
func (f *Fs) getFolderList(ctx context.Context, path string) (*api.FolderListResponse, error) {
|
||||
encodedDir := f.fromStandardPath(path)
|
||||
apiURL := fmt.Sprintf("%s/folder/list?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(encodedDir),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var response api.FolderListResponse
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&response); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
if response.Status != 200 {
|
||||
if strings.Contains(response.Msg, "Folder not found") {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return nil, fmt.Errorf("API error: %s", response.Msg)
|
||||
}
|
||||
|
||||
for index := range response.Result.Folders {
|
||||
response.Result.Folders[index].Path = f.toStandardPath(response.Result.Folders[index].Path)
|
||||
}
|
||||
|
||||
for index := range response.Result.Files {
|
||||
response.Result.Files[index].Name = f.toStandardPath(response.Result.Files[index].Name)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
|
||||
}
|
||||
|
||||
// deleteFolder deletes a folder at the specified path.
|
||||
func (f *Fs) deleteFolder(ctx context.Context, fullPath string) error {
|
||||
fullPath = f.fromStandardPath(fullPath)
|
||||
deleteURL := fmt.Sprintf("%s/folder/delete?folder_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(fullPath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
delResp := api.DeleteFolderResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", deleteURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &delResp); err != nil {
|
||||
return false, fmt.Errorf("error decoding delete response: %w", err)
|
||||
}
|
||||
if delResp.Status != 200 {
|
||||
return false, fmt.Errorf("delete error: %s", delResp.Msg)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(f, "Rmdir: successfully deleted %q", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirectLink of file from FileLu to download.
|
||||
func (f *Fs) getDirectLink(ctx context.Context, filePath string) (string, int64, error) {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/direct_link?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.FileDirectLinkResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
|
||||
return result.Result.URL, result.Result.Size, nil
|
||||
}
|
||||
|
||||
// deleteFile deletes a file based on filePath
|
||||
func (f *Fs) deleteFile(ctx context.Context, filePath string) error {
|
||||
filePath = f.fromStandardPath(filePath)
|
||||
apiURL := fmt.Sprintf("%s/file/remove?file_path=%s&key=%s",
|
||||
f.endpoint,
|
||||
url.QueryEscape(filePath),
|
||||
url.QueryEscape(f.opt.Key),
|
||||
)
|
||||
|
||||
result := api.DeleteFileResponse{}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch direct link: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// getAccountInfo retrieves account information
|
||||
func (f *Fs) getAccountInfo(ctx context.Context) (*api.AccountInfoResponse, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/account/info",
|
||||
Parameters: url.Values{
|
||||
"key": {f.opt.Key},
|
||||
},
|
||||
}
|
||||
|
||||
var result api.AccountInfoResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
_, callErr := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return fserrors.ShouldRetry(callErr), callErr
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return nil, fmt.Errorf("error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// getFileInfo retrieves file information based on file code
|
||||
func (f *Fs) getFileInfo(ctx context.Context, fileCode string) (*api.FileInfoResponse, error) {
|
||||
u, _ := url.Parse(f.endpoint + "/file/info2")
|
||||
q := u.Query()
|
||||
q.Set("file_code", fileCode) // raw path — Go handles escaping properly here
|
||||
q.Set("key", f.opt.Key)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
apiURL := f.endpoint + "/file/info2?" + u.RawQuery
|
||||
|
||||
var body []byte
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to fetch file info: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error reading response body: %w", err)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := api.FileInfoResponse{}
|
||||
|
||||
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&result); err != nil {
|
||||
return nil, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
@@ -1,193 +0,0 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// uploadFile uploads a file to FileLu
|
||||
func (f *Fs) uploadFile(ctx context.Context, fileContent io.Reader, fileFullPath string) error {
|
||||
directory := path.Dir(fileFullPath)
|
||||
fileName := path.Base(fileFullPath)
|
||||
if directory == "." {
|
||||
directory = ""
|
||||
}
|
||||
destinationFolderPath := path.Join(f.root, directory)
|
||||
if destinationFolderPath != "" {
|
||||
destinationFolderPath = "/" + strings.Trim(destinationFolderPath, "/")
|
||||
}
|
||||
|
||||
existingEntries, err := f.List(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorDirNotFound) {
|
||||
err = f.Mkdir(ctx, path.Dir(fileFullPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("failed to list existing files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, entry := range existingEntries {
|
||||
if entry.Remote() == fileFullPath {
|
||||
_, ok := entry.(fs.Object)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file exists but is different, remove it
|
||||
filePath := "/" + strings.Trim(destinationFolderPath+"/"+fileName, "/")
|
||||
err = f.deleteFile(ctx, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete existing file: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uploadURL, sessID, err := f.getUploadServer(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retrieve upload server: %w", err)
|
||||
}
|
||||
|
||||
// Since the fileCode isn't used, just handle the error
|
||||
if _, err := f.uploadFileWithDestination(ctx, uploadURL, sessID, fileName, fileContent, destinationFolderPath); err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getUploadServer gets the upload server URL with proper key authentication
|
||||
func (f *Fs) getUploadServer(ctx context.Context) (string, string, error) {
|
||||
apiURL := fmt.Sprintf("%s/upload/server?key=%s", f.endpoint, url.QueryEscape(f.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
SessID string `json:"sess_id"`
|
||||
Result string `json:"result"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to get upload server: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("error decoding response: %w", err)
|
||||
}
|
||||
|
||||
if result.Status != 200 {
|
||||
return false, fmt.Errorf("API error: %s", result.Msg)
|
||||
}
|
||||
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return result.Result, result.SessID, nil
|
||||
}
|
||||
|
||||
// uploadFileWithDestination uploads a file directly to a specified folder using file content reader.
|
||||
func (f *Fs) uploadFileWithDestination(ctx context.Context, uploadURL, sessID, fileName string, fileContent io.Reader, dirPath string) (string, error) {
|
||||
destinationPath := f.fromStandardPath(dirPath)
|
||||
encodedFileName := f.fromStandardPath(fileName)
|
||||
pr, pw := io.Pipe()
|
||||
writer := multipart.NewWriter(pw)
|
||||
isDeletionRequired := false
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := pw.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close: %v", err)
|
||||
}
|
||||
}()
|
||||
_ = writer.WriteField("sess_id", sessID)
|
||||
_ = writer.WriteField("utype", "prem")
|
||||
_ = writer.WriteField("fld_path", destinationPath)
|
||||
|
||||
part, err := writer.CreateFormFile("file_0", encodedFileName)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to create form file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.Copy(part, fileContent); err != nil {
|
||||
isDeletionRequired = true
|
||||
pw.CloseWithError(fmt.Errorf("failed to copy file content: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("failed to close writer: %w", err))
|
||||
}
|
||||
}()
|
||||
|
||||
var fileCode string
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", uploadURL, pr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create upload request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to send upload request: %w", err)
|
||||
}
|
||||
defer respBodyClose(resp.Body)
|
||||
|
||||
var result []struct {
|
||||
FileCode string `json:"file_code"`
|
||||
FileStatus string `json:"file_status"`
|
||||
}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, fmt.Errorf("failed to parse upload response: %w", err)
|
||||
}
|
||||
|
||||
if len(result) == 0 || result[0].FileStatus != "OK" {
|
||||
return false, fmt.Errorf("upload failed with status: %s", result[0].FileStatus)
|
||||
}
|
||||
|
||||
fileCode = result[0].FileCode
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
|
||||
if err != nil && isDeletionRequired {
|
||||
// Attempt to delete the file if upload fails
|
||||
_ = f.deleteFile(ctx, destinationPath+"/"+fileName)
|
||||
}
|
||||
|
||||
return fileCode, err
|
||||
}
|
||||
|
||||
// respBodyClose to check body response.
|
||||
func respBodyClose(responseBody io.Closer) {
|
||||
if cerr := responseBody.Close(); cerr != nil {
|
||||
fmt.Printf("Error closing response body: %v\n", cerr)
|
||||
}
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// errFileNotFound represent file not found error
|
||||
var errFileNotFound error = errors.New("file not found")
|
||||
|
||||
// getFileCode retrieves the file code for a given file path
|
||||
func (f *Fs) getFileCode(ctx context.Context, filePath string) (string, error) {
|
||||
// Prepare parent directory
|
||||
parentDir := path.Dir(filePath)
|
||||
|
||||
// Call List to get all the files
|
||||
result, err := f.getFolderList(ctx, parentDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, file := range result.Result.Files {
|
||||
filePathFromServer := parentDir + "/" + file.Name
|
||||
if parentDir == "/" {
|
||||
filePathFromServer = "/" + file.Name
|
||||
}
|
||||
if filePath == filePathFromServer {
|
||||
return file.FileCode, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errFileNotFound
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
func (f *Fs) fromStandardPath(remote string) string {
|
||||
return f.opt.Enc.FromStandardPath(remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toStandardPath(remote string) string {
|
||||
return f.opt.Enc.ToStandardPath(remote)
|
||||
}
|
||||
|
||||
// Hashes returns an empty hash set, indicating no hash support
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet() // Properly creates an empty hash set
|
||||
}
|
||||
|
||||
// Name returns the remote name
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root path
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Precision returns the precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("FileLu root '%s'", f.root)
|
||||
}
|
||||
|
||||
// isFileCode checks if a string looks like a file code
|
||||
func isFileCode(s string) bool {
|
||||
if len(s) != 12 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
return fserrors.ShouldRetry(err)
|
||||
}
|
||||
|
||||
func shouldRetryHTTP(code int) bool {
|
||||
return code == 429 || code >= 500
|
||||
}
|
||||
|
||||
func rootSplit(absPath string) (bucket, bucketPath string) {
|
||||
// No bucket
|
||||
if absPath == "" {
|
||||
return "", ""
|
||||
}
|
||||
slash := strings.IndexRune(absPath, '/')
|
||||
// Bucket but no path
|
||||
if slash < 0 {
|
||||
return absPath, ""
|
||||
}
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
@@ -1,259 +0,0 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Object describes a FileLu object
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// NewObject creates a new Object for the given remote path
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var filePath string
|
||||
filePath = path.Join(f.root, remote)
|
||||
filePath = "/" + strings.Trim(filePath, "/")
|
||||
|
||||
// Get File code
|
||||
fileCode, err := f.getFileCode(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Get File info
|
||||
fileInfos, err := f.getFileInfo(ctx, fileCode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
|
||||
fileInfo := fileInfos.Result[0]
|
||||
size, _ := strconv.ParseInt(fileInfo.Size, 10, 64)
|
||||
|
||||
returnedRemote := remote
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: returnedRemote,
|
||||
size: size,
|
||||
modTime: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Open opens the object for reading
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
filePath := path.Join(o.fs.root, o.remote)
|
||||
// Get direct link
|
||||
directLink, size, err := o.fs.getDirectLink(ctx, filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get direct link: %w", err)
|
||||
}
|
||||
|
||||
o.size = size
|
||||
|
||||
// Offset and Count for range download
|
||||
var offset int64
|
||||
var count int64
|
||||
fs.FixRangeOption(options, o.size)
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.RangeOption:
|
||||
offset, count = x.Decode(o.size)
|
||||
if count < 0 {
|
||||
count = o.size - offset
|
||||
}
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
count = o.size
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reader io.ReadCloser
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", directLink, nil)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to create download request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
return false, fmt.Errorf("failed to download file: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Wrap the response body to handle offset and count
|
||||
currentContents, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
if offset > 0 {
|
||||
if offset > int64(len(currentContents)) {
|
||||
return false, fmt.Errorf("offset %d exceeds file size %d", offset, len(currentContents))
|
||||
}
|
||||
currentContents = currentContents[offset:]
|
||||
}
|
||||
if count > 0 && count < int64(len(currentContents)) {
|
||||
currentContents = currentContents[:count]
|
||||
}
|
||||
reader = io.NopCloser(bytes.NewReader(currentContents))
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Update updates the object with new data
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() <= 0 {
|
||||
return fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
err := o.fs.uploadFile(ctx, in, o.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
o.size = src.Size()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from FileLu
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
fullPath := "/" + strings.Trim(path.Join(o.fs.root, o.remote), "/")
|
||||
|
||||
err := o.fs.deleteFile(ctx, fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(o.fs, "Successfully deleted file: %s", fullPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash returns the MD5 hash of an object
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
var fileCode string
|
||||
if isFileCode(o.fs.root) {
|
||||
fileCode = o.fs.root
|
||||
} else {
|
||||
matches := regexp.MustCompile(`\((.*?)\)`).FindAllStringSubmatch(o.remote, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 && len(match[1]) == 12 {
|
||||
fileCode = match[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if fileCode == "" {
|
||||
return "", fmt.Errorf("no valid file code found in the remote path")
|
||||
}
|
||||
|
||||
apiURL := fmt.Sprintf("%s/file/info?file_code=%s&key=%s",
|
||||
o.fs.endpoint, url.QueryEscape(fileCode), url.QueryEscape(o.fs.opt.Key))
|
||||
|
||||
var result struct {
|
||||
Status int `json:"status"`
|
||||
Msg string `json:"msg"`
|
||||
Result []struct {
|
||||
Hash string `json:"hash"`
|
||||
} `json:"result"`
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", apiURL, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := o.fs.client.Do(req)
|
||||
if err != nil {
|
||||
return shouldRetry(err), err
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
fs.Logf(nil, "Failed to close response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return shouldRetryHTTP(resp.StatusCode), nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if result.Status != 200 || len(result.Result) == 0 {
|
||||
return "", fmt.Errorf("error: unable to fetch hash: %s", result.Msg)
|
||||
}
|
||||
|
||||
return result.Result[0].Hash, nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the object
|
||||
func (o *Object) String() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the object
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
|
||||
// Storable indicates whether the object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package filelu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests for the FileLu backend
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFileLu:",
|
||||
NilObject: nil,
|
||||
SkipInvalidUTF8: true,
|
||||
})
|
||||
}
|
||||
@@ -1,15 +0,0 @@
|
||||
package filelu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// parseStorageToBytes converts a storage string (e.g., "10") to bytes
|
||||
func parseStorageToBytes(storage string) (int64, error) {
|
||||
var gb float64
|
||||
_, err := fmt.Sscanf(storage, "%f", &gb)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse storage: %w", err)
|
||||
}
|
||||
return int64(gb * 1024 * 1024 * 1024), nil
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -186,14 +185,6 @@ Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -257,7 +248,6 @@ type Options struct {
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
NoCheckUpload bool `config:"no_check_upload"`
|
||||
}
|
||||
|
||||
@@ -276,7 +266,6 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -424,26 +413,11 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(f, "> dial: conn=%v, err=%v", conn, err)
|
||||
} else {
|
||||
fs.Debugf(f, "> dial: conn=%s->%s, err=%v", conn.LocalAddr(), conn.RemoteAddr(), err)
|
||||
}
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
var dialPort string
|
||||
_, dialPort, err = net.SplitHostPort(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
@@ -657,14 +631,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
CanHaveEmptyDirectories: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
// set the pool drainer timer going
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
|
||||
@@ -194,9 +194,33 @@ type DeleteResponse struct {
|
||||
Data map[string]Error
|
||||
}
|
||||
|
||||
// DirectUploadURL returns the direct upload URL for Gofile
|
||||
func DirectUploadURL() string {
|
||||
return "https://upload.gofile.io/uploadfile"
|
||||
// Server is an upload server
|
||||
type Server struct {
|
||||
Name string `json:"name"`
|
||||
Zone string `json:"zone"`
|
||||
}
|
||||
|
||||
// String returns a string representation of the Server
|
||||
func (s *Server) String() string {
|
||||
return fmt.Sprintf("%s (%s)", s.Name, s.Zone)
|
||||
}
|
||||
|
||||
// Root returns the root URL for the server
|
||||
func (s *Server) Root() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/", s.Name)
|
||||
}
|
||||
|
||||
// URL returns the upload URL for the server
|
||||
func (s *Server) URL() string {
|
||||
return fmt.Sprintf("https://%s.gofile.io/contents/uploadfile", s.Name)
|
||||
}
|
||||
|
||||
// ServersResponse is the output from /servers
|
||||
type ServersResponse struct {
|
||||
Error
|
||||
Data struct {
|
||||
Servers []Server `json:"servers"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is returned by POST /contents/uploadfile
|
||||
|
||||
@@ -8,11 +8,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/gofile/api"
|
||||
@@ -23,7 +25,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -35,8 +37,10 @@ const (
|
||||
maxSleep = 20 * time.Second
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
rootURL = "https://api.gofile.io"
|
||||
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
|
||||
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
|
||||
serversExpiry = 60 * time.Second // check for new upload servers this often
|
||||
serversActive = 2 // choose this many closest upload servers to use
|
||||
rateLimitSleep = 5 * time.Second // penalise a goroutine by this long for making a rate limit error
|
||||
maxDepth = 4 // in ListR recursive list this deep (maximum is 16)
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -124,13 +128,16 @@ type Options struct {
|
||||
|
||||
// Fs represents a remote gofile
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
serversMu *sync.Mutex // protect the servers info below
|
||||
servers []api.Server // upload servers we can use
|
||||
serversChecked time.Time // time the servers were refreshed
|
||||
}
|
||||
|
||||
// Object describes a gofile object
|
||||
@@ -304,11 +311,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
client := fshttp.NewClient(ctx)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
serversMu: new(sync.Mutex),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
@@ -427,6 +435,98 @@ func (f *Fs) readRootFolderID(ctx context.Context, m configmap.Mapper) (err erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find the top n servers measured by response time
|
||||
func (f *Fs) bestServers(ctx context.Context, servers []api.Server, n int) (newServers []api.Server) {
|
||||
ctx, cancel := context.WithDeadline(ctx, time.Now().Add(10*time.Second))
|
||||
defer cancel()
|
||||
|
||||
if n > len(servers) {
|
||||
n = len(servers)
|
||||
}
|
||||
results := make(chan int, len(servers))
|
||||
|
||||
// Test how long the servers take to respond
|
||||
for i := range servers {
|
||||
i := i // for closure
|
||||
go func() {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: servers[i].Root(),
|
||||
}
|
||||
var result api.UploadServerStatus
|
||||
start := time.Now()
|
||||
_, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
ping := time.Since(start)
|
||||
err = result.Err(err)
|
||||
if err != nil {
|
||||
results <- -1 // send a -ve number on error
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "Upload server %v responded in %v", &servers[i], ping)
|
||||
results <- i
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for n servers to respond
|
||||
newServers = make([]api.Server, 0, n)
|
||||
for range servers {
|
||||
i := <-results
|
||||
if i >= 0 {
|
||||
newServers = append(newServers, servers[i])
|
||||
}
|
||||
if len(newServers) >= n {
|
||||
break
|
||||
}
|
||||
}
|
||||
return newServers
|
||||
}
|
||||
|
||||
// Clear all the upload servers - call on an error
|
||||
func (f *Fs) clearServers() {
|
||||
f.serversMu.Lock()
|
||||
defer f.serversMu.Unlock()
|
||||
|
||||
fs.Debugf(f, "Clearing upload servers")
|
||||
f.servers = nil
|
||||
}
|
||||
|
||||
// Gets an upload server
|
||||
func (f *Fs) getServer(ctx context.Context) (server *api.Server, err error) {
|
||||
f.serversMu.Lock()
|
||||
defer f.serversMu.Unlock()
|
||||
|
||||
if len(f.servers) == 0 || time.Since(f.serversChecked) >= serversExpiry {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/servers",
|
||||
}
|
||||
var result api.ServersResponse
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err = result.Err(err); err != nil {
|
||||
if len(f.servers) == 0 {
|
||||
return nil, fmt.Errorf("failed to read upload servers: %w", err)
|
||||
}
|
||||
fs.Errorf(f, "failed to read new upload servers: %v", err)
|
||||
} else {
|
||||
// Find the top servers measured by response time
|
||||
f.servers = f.bestServers(ctx, result.Data.Servers, serversActive)
|
||||
f.serversChecked = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
if len(f.servers) == 0 {
|
||||
return nil, errors.New("no upload servers found")
|
||||
}
|
||||
|
||||
// Pick a server at random since we've already found the top ones
|
||||
i := rand.Intn(len(f.servers))
|
||||
return &f.servers[i], nil
|
||||
}
|
||||
|
||||
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
||||
func (f *Fs) rootSlash() string {
|
||||
if f.root == "" {
|
||||
@@ -634,7 +734,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
// implementation of ListR
|
||||
func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err error) {
|
||||
func (f *Fs) listR(ctx context.Context, dir string, list *walk.ListRHelper) (err error) {
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -720,7 +820,7 @@ func (f *Fs) listR(ctx context.Context, dir string, list *list.Helper) (err erro
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively than doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listR(ctx, dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1426,6 +1526,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// Find an upload server
|
||||
server, err := o.fs.getServer(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Debugf(o, "Using upload server %v", server)
|
||||
|
||||
// If the file exists, delete it after a successful upload
|
||||
if o.id != "" {
|
||||
id := o.id
|
||||
@@ -1454,7 +1561,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
},
|
||||
MultipartContentName: "file",
|
||||
MultipartFileName: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
RootURL: api.DirectUploadURL(),
|
||||
RootURL: server.URL(),
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
@@ -1462,6 +1569,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err = result.Err(err); err != nil {
|
||||
if isAPIErr(err, "error-freespace") {
|
||||
fs.Errorf(o, "Upload server out of space - need to retry upload")
|
||||
}
|
||||
o.fs.clearServers()
|
||||
return fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
return o.setMetaData(&result.Data)
|
||||
|
||||
@@ -35,7 +35,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -483,9 +483,6 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
|
||||
bucketPath = bucketPath[:len(bucketPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -715,7 +712,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
continue
|
||||
}
|
||||
// process directory markers as directories
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
@@ -848,7 +845,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
@@ -962,7 +959,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
|
||||
@@ -43,7 +43,6 @@ var (
|
||||
errAlbumDelete = errors.New("google photos API does not implement deleting albums")
|
||||
errRemove = errors.New("google photos API only implements removing files from albums")
|
||||
errOwnAlbums = errors.New("google photos API only allows uploading to albums rclone created")
|
||||
errReadOnly = errors.New("can't upload files in read only mode")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -53,31 +52,19 @@ const (
|
||||
listChunks = 100 // chunk size to read directory listings
|
||||
albumChunks = 50 // chunk size to read album listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
scopeAppendOnly = "https://www.googleapis.com/auth/photoslibrary.appendonly"
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary.edit.appcreateddata"
|
||||
scopeReadOnly = "https://www.googleapis.com/auth/photoslibrary.readonly"
|
||||
scopeReadWrite = "https://www.googleapis.com/auth/photoslibrary"
|
||||
scopeAccess = 2 // position of access scope in list
|
||||
)
|
||||
|
||||
var (
|
||||
// scopes needed for read write access
|
||||
scopesReadWrite = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeAppendOnly,
|
||||
scopeReadOnly,
|
||||
scopeReadWrite,
|
||||
}
|
||||
|
||||
// scopes needed for read only access
|
||||
scopesReadOnly = []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadOnly,
|
||||
}
|
||||
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauthutil.Config{
|
||||
Scopes: scopesReadWrite,
|
||||
Scopes: []string{
|
||||
"openid",
|
||||
"profile",
|
||||
scopeReadWrite, // this must be at position scopeAccess
|
||||
},
|
||||
AuthURL: google.Endpoint.AuthURL,
|
||||
TokenURL: google.Endpoint.TokenURL,
|
||||
ClientID: rcloneClientID,
|
||||
@@ -113,9 +100,9 @@ func init() {
|
||||
case "":
|
||||
// Fill in the scopes
|
||||
if opt.ReadOnly {
|
||||
oauthConfig.Scopes = scopesReadOnly
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadOnly
|
||||
} else {
|
||||
oauthConfig.Scopes = scopesReadWrite
|
||||
oauthConfig.Scopes[scopeAccess] = scopeReadWrite
|
||||
}
|
||||
return oauthutil.ConfigOut("warning", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
@@ -180,7 +167,7 @@ listings and won't be transferred.`,
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you use the gphotosdl proxy then you can download original,
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
@@ -346,7 +333,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
baseClient := fshttp.NewClient(ctx)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure google photos: %w", err)
|
||||
return nil, fmt.Errorf("failed to configure Box: %w", err)
|
||||
}
|
||||
|
||||
root = strings.Trim(path.Clean(root), "/")
|
||||
@@ -1133,9 +1120,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
if !album.IsWriteable {
|
||||
if o.fs.opt.ReadOnly {
|
||||
return errReadOnly
|
||||
}
|
||||
return errOwnAlbums
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/kv"
|
||||
)
|
||||
|
||||
@@ -183,9 +182,6 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
// Enable ListP always
|
||||
f.features.ListP = f.ListP
|
||||
|
||||
cache.PinUntilFinalized(f.Fs, f)
|
||||
return f, err
|
||||
}
|
||||
@@ -241,39 +237,10 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
|
||||
|
||||
// List the objects and directories in dir into entries.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
wrappedCallback := func(entries fs.DirEntries) error {
|
||||
entries, err := f.wrapEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(entries)
|
||||
if entries, err = f.Fs.List(ctx, dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listP := f.Fs.Features().ListP
|
||||
if listP == nil {
|
||||
entries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrappedCallback(entries)
|
||||
}
|
||||
return listP(ctx, dir, wrappedCallback)
|
||||
return f.wrapEntries(entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories recursively into out.
|
||||
|
||||
@@ -180,6 +180,7 @@ func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Op
|
||||
}
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
|
||||
return createFileResult()
|
||||
@@ -248,14 +249,6 @@ func (f *Fs) httpConnection(ctx context.Context, opt *Options) (isFile bool, err
|
||||
f.httpClient = client
|
||||
f.endpoint = u
|
||||
f.endpointURL = u.String()
|
||||
|
||||
if isFile {
|
||||
// Correct root if definitely pointing to a file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
return isFile, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -252,14 +252,18 @@ func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.Op
|
||||
}
|
||||
|
||||
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||
// icloud has some weird http codes
|
||||
if err != nil && resp != nil && resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
if err != nil {
|
||||
// icloud has some weird http codes
|
||||
if resp.StatusCode == 330 {
|
||||
loc, err := resp.Location()
|
||||
if err == nil {
|
||||
return d.DownloadFile(ctx, loc.String(), opt)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
return resp, err
|
||||
return d.icloud.srv.Call(ctx, opts)
|
||||
}
|
||||
|
||||
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||
@@ -627,7 +631,7 @@ func NewUpdateFileInfo() UpdateFileInfo {
|
||||
FileFlags: FileFlags{
|
||||
IsExecutable: true,
|
||||
IsHidden: false,
|
||||
IsWritable: true,
|
||||
IsWritable: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -1264,7 +1264,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
opts.Parameters.Set("mode", "liststream")
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
|
||||
@@ -1090,10 +1090,6 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
if r == hash.None {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Check that the underlying file hasn't changed
|
||||
o.fs.objectMetaMu.RLock()
|
||||
oldtime := o.modTime
|
||||
@@ -1201,15 +1197,7 @@ func (o *Object) Storable() bool {
|
||||
o.fs.objectMetaMu.RLock()
|
||||
mode := o.mode
|
||||
o.fs.objectMetaMu.RUnlock()
|
||||
|
||||
// On Windows items with os.ModeIrregular are likely Junction
|
||||
// points so we treat them as symlinks for the purpose of ignoring them.
|
||||
// https://github.com/golang/go/issues/73827
|
||||
symlinkFlag := os.ModeSymlink
|
||||
if runtime.GOOS == "windows" {
|
||||
symlinkFlag |= os.ModeIrregular
|
||||
}
|
||||
if mode&symlinkFlag != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
||||
if !o.fs.opt.SkipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
|
||||
@@ -204,23 +204,6 @@ func TestSymlinkError(t *testing.T) {
|
||||
assert.Equal(t, errLinksAndCopyLinks, err)
|
||||
}
|
||||
|
||||
func TestHashWithTypeNone(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
const filePath = "file.txt"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Get the object
|
||||
o, err := f.NewObject(ctx, filePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test the hash is as we expect
|
||||
h, err := o.Hash(ctx, hash.None)
|
||||
require.Empty(t, h)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Test hashes on updating an object
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -17,11 +17,9 @@ Improvements:
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -218,25 +216,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
defer megaCacheMu.Unlock()
|
||||
srv := megaCache[opt.User]
|
||||
if srv == nil {
|
||||
// srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
|
||||
// Workaround for Mega's use of insecure cipher suites which are no longer supported by default since Go 1.22.
|
||||
// Relevant issues:
|
||||
// https://github.com/rclone/rclone/issues/8565
|
||||
// https://github.com/meganz/webclient/issues/103
|
||||
clt := fshttp.NewClient(ctx)
|
||||
clt.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||
var ids []uint16
|
||||
// Read default ciphers
|
||||
for _, cs := range tls.CipherSuites() {
|
||||
ids = append(ids, cs.ID)
|
||||
}
|
||||
// Insecure but Mega uses TLS_RSA_WITH_AES_128_GCM_SHA256 for storage endpoints
|
||||
// (e.g. https://gfs302n114.userstorage.mega.co.nz) as of June 18, 2025.
|
||||
t.TLSClientConfig.CipherSuites = append(ids, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
|
||||
})
|
||||
srv = mega.New().SetClient(clt)
|
||||
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...any) {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
)
|
||||
|
||||
@@ -383,7 +383,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
entries := fs.DirEntries{}
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, entry fs.DirEntry, isDirectory bool) error {
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
@@ -516,7 +516,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
for resumeStart := u.Path; resumeStart != ""; {
|
||||
var files []File
|
||||
files, resumeStart, err = f.netStorageListRequest(ctx, URL, u.Path)
|
||||
|
||||
@@ -396,57 +396,10 @@ func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Order the permissions so that any with users come first.
|
||||
//
|
||||
// This is to work around a quirk with Graph:
|
||||
//
|
||||
// 1. You are adding permissions for both a group and a user.
|
||||
// 2. The user is a member of the group.
|
||||
// 3. The permissions for the group and user are the same.
|
||||
// 4. You are adding the group permission before the user permission.
|
||||
//
|
||||
// When all of the above are true, Graph indicates it has added the
|
||||
// user permission, but it immediately drops it
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/8465
|
||||
func (m *Metadata) orderPermissions(xs []*api.PermissionsType) {
|
||||
// Return true if identity has any user permissions
|
||||
hasUserIdentity := func(identity *api.IdentitySet) bool {
|
||||
if identity == nil {
|
||||
return false
|
||||
}
|
||||
return identity.User.ID != "" || identity.User.DisplayName != "" || identity.User.Email != "" || identity.User.LoginName != ""
|
||||
}
|
||||
// Return true if p has any user permissions
|
||||
hasUser := func(p *api.PermissionsType) bool {
|
||||
if hasUserIdentity(p.GetGrantedTo(m.fs.driveType)) {
|
||||
return true
|
||||
}
|
||||
for _, identity := range p.GetGrantedToIdentities(m.fs.driveType) {
|
||||
if hasUserIdentity(identity) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
// Put Permissions with a user first, leaving unsorted otherwise
|
||||
slices.SortStableFunc(xs, func(a, b *api.PermissionsType) int {
|
||||
aHasUser := hasUser(a)
|
||||
bHasUser := hasUser(b)
|
||||
if aHasUser && !bHasUser {
|
||||
return -1
|
||||
} else if !aHasUser && bHasUser {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
}
|
||||
|
||||
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues
|
||||
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
|
||||
new, old := m.queuedPermissions, m.permissions
|
||||
if len(old) == 0 || m.permsAddOnly {
|
||||
m.orderPermissions(new)
|
||||
return new, nil, nil // they must all be "add"
|
||||
}
|
||||
|
||||
@@ -494,9 +447,6 @@ func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType
|
||||
remove = append(remove, o)
|
||||
}
|
||||
}
|
||||
m.orderPermissions(add)
|
||||
m.orderPermissions(update)
|
||||
m.orderPermissions(remove)
|
||||
return add, update, remove
|
||||
}
|
||||
|
||||
@@ -749,8 +699,6 @@ func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo,
|
||||
|
||||
// Fetch metadata and update updateInfo if --metadata is in use
|
||||
// modtime will still be set when there is no metadata to set
|
||||
//
|
||||
// May return info=nil and err=nil if there was no metadata to update.
|
||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
@@ -770,8 +718,6 @@ func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, opti
|
||||
}
|
||||
|
||||
// updateMetadata calls Get, Set, and Write
|
||||
//
|
||||
// May return info=nil and err=nil if there was no metadata to update.
|
||||
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
|
||||
_, err = o.meta.Get(ctx) // refresh permissions
|
||||
if err != nil {
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOrderPermissions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []*api.PermissionsType
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
input: []*api.PermissionsType{},
|
||||
expected: []string(nil),
|
||||
},
|
||||
{
|
||||
name: "users first, then group, then none",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "1", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group1"}}},
|
||||
{ID: "2", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{DisplayName: "Alice"}}}},
|
||||
{ID: "3", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Alice"}}},
|
||||
{ID: "4"},
|
||||
},
|
||||
expected: []string{"2", "3", "1", "4"},
|
||||
},
|
||||
{
|
||||
name: "same type unsorted",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "b", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group B"}}},
|
||||
{ID: "a", GrantedTo: &api.IdentitySet{Group: api.Identity{DisplayName: "Group A"}}},
|
||||
{ID: "c", GrantedToIdentities: []*api.IdentitySet{{Group: api.Identity{DisplayName: "Group A"}}, {User: api.Identity{DisplayName: "Alice"}}}},
|
||||
},
|
||||
expected: []string{"c", "b", "a"},
|
||||
},
|
||||
{
|
||||
name: "all user identities",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "c", GrantedTo: &api.IdentitySet{User: api.Identity{DisplayName: "Bob"}}},
|
||||
{ID: "a", GrantedTo: &api.IdentitySet{User: api.Identity{Email: "alice@example.com"}}},
|
||||
{ID: "b", GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{LoginName: "user3"}}}},
|
||||
},
|
||||
expected: []string{"c", "a", "b"},
|
||||
},
|
||||
{
|
||||
name: "no user or group info",
|
||||
input: []*api.PermissionsType{
|
||||
{ID: "z"},
|
||||
{ID: "x"},
|
||||
{ID: "y"},
|
||||
},
|
||||
expected: []string{"z", "x", "y"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, driveType := range []string{driveTypePersonal, driveTypeBusiness} {
|
||||
t.Run(driveType, func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
m := &Metadata{fs: &Fs{driveType: driveType}}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if driveType == driveTypeBusiness {
|
||||
for i := range tt.input {
|
||||
tt.input[i].GrantedToV2 = tt.input[i].GrantedTo
|
||||
tt.input[i].GrantedTo = nil
|
||||
tt.input[i].GrantedToIdentitiesV2 = tt.input[i].GrantedToIdentities
|
||||
tt.input[i].GrantedToIdentities = nil
|
||||
}
|
||||
}
|
||||
m.orderPermissions(tt.input)
|
||||
var gotIDs []string
|
||||
for _, p := range tt.input {
|
||||
gotIDs = append(gotIDs, p.ID)
|
||||
}
|
||||
assert.Equal(t, tt.expected, gotIDs)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderPermissionsJSON(t *testing.T) {
|
||||
testJSON := `[
|
||||
{
|
||||
"id": "1",
|
||||
"grantedToV2": {
|
||||
"group": {
|
||||
"id": "group@example.com"
|
||||
}
|
||||
},
|
||||
"roles": [
|
||||
"write"
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "2",
|
||||
"grantedToV2": {
|
||||
"user": {
|
||||
"id": "user@example.com"
|
||||
}
|
||||
},
|
||||
"roles": [
|
||||
"write"
|
||||
]
|
||||
}
|
||||
]`
|
||||
|
||||
var testPerms []*api.PermissionsType
|
||||
err := json.Unmarshal([]byte(testJSON), &testPerms)
|
||||
require.NoError(t, err)
|
||||
|
||||
m := &Metadata{fs: &Fs{driveType: driveTypeBusiness}}
|
||||
m.orderPermissions(testPerms)
|
||||
var gotIDs []string
|
||||
for _, p := range testPerms {
|
||||
gotIDs = append(gotIDs, p.ID)
|
||||
}
|
||||
assert.Equal(t, []string{"2", "1"}, gotIDs)
|
||||
|
||||
}
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -56,7 +55,6 @@ const (
|
||||
driveTypeSharepoint = "documentLibrary"
|
||||
defaultChunkSize = 10 * fs.Mebi
|
||||
chunkSizeMultiple = 320 * fs.Kibi
|
||||
maxSinglePartSize = 4 * fs.Mebi
|
||||
|
||||
regionGlobal = "global"
|
||||
regionUS = "us"
|
||||
@@ -139,21 +137,6 @@ func init() {
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
|
||||
This is disabled by default as uploading using single part uploads
|
||||
causes rclone to use twice the storage on Onedrive business as when
|
||||
rclone sets the modification time after the upload Onedrive creates a
|
||||
new version.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/1716
|
||||
`,
|
||||
Default: fs.SizeSuffix(-1),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to upload files with - must be multiple of 320k (327,680 bytes).
|
||||
@@ -762,7 +745,6 @@ Examples:
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Region string `config:"region"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DriveID string `config:"drive_id"`
|
||||
DriveType string `config:"drive_type"`
|
||||
@@ -1039,13 +1021,6 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxSinglePartSize {
|
||||
return fmt.Errorf("%v is greater than %v", cs, maxSinglePartSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -1059,10 +1034,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: chunk size: %w", err)
|
||||
}
|
||||
err = checkUploadCutoff(opt.UploadCutoff)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("onedrive: upload cutoff: %w", err)
|
||||
}
|
||||
|
||||
if opt.DriveID == "" || opt.DriveType == "" {
|
||||
return nil, errors.New("unable to get drive_id and drive_type - if you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
@@ -1425,7 +1396,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
// So we have to filter things outside of the root which is
|
||||
// inefficient.
|
||||
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
|
||||
// list a folder conventionally - used for shared folders
|
||||
var listFolder func(dir string) error
|
||||
@@ -1782,9 +1753,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info != nil {
|
||||
err = dstObj.setMetaData(info)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
@@ -1864,9 +1833,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info != nil {
|
||||
err = dstObj.setMetaData(info)
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
@@ -2501,10 +2468,6 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
return false, nil
|
||||
}
|
||||
return true, fmt.Errorf("retry this chunk skipping %d bytes: %w", skip, err)
|
||||
} else if err != nil && resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
fs.Debugf(o, "Received 404 error: assuming eventual consistency problem with session - retrying chunk: %v", err)
|
||||
time.Sleep(5 * time.Second) // a little delay to help things along
|
||||
return true, err
|
||||
}
|
||||
if err != nil {
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@@ -2599,8 +2562,8 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
|
||||
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
if size < 0 || size > int64(maxSinglePartSize) {
|
||||
return nil, fmt.Errorf("size passed into uploadSinglepart must be >= 0 and <= %v", maxSinglePartSize)
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Starting singlepart upload")
|
||||
@@ -2633,10 +2596,7 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.Obje
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch and update metadata: %w", err)
|
||||
}
|
||||
if info != nil {
|
||||
err = o.setMetaData(info)
|
||||
}
|
||||
return info, err
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -2656,9 +2616,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
size := src.Size()
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 && size >= int64(o.fs.opt.UploadCutoff) {
|
||||
if size > 0 {
|
||||
info, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else if size >= 0 {
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, src, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
@@ -649,7 +649,7 @@ of listing recursively that doing a directory traversal.
|
||||
*/
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -378,20 +378,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// XOpenWriterAt opens with a handle for random access writes
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object.
|
||||
//
|
||||
// OpenWriterAt disabled because it seems to have been disabled at pcloud
|
||||
// PUT /file_open?flags=XXX&folderid=XXX&name=XXX HTTP/1.1
|
||||
//
|
||||
// {
|
||||
// "result": 2003,
|
||||
// "error": "Access denied. You do not have permissions to perform this operation."
|
||||
// }
|
||||
func (f *Fs) XOpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
client, err := f.newSingleConnClient(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create client: %w", err)
|
||||
@@ -639,7 +631,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
|
||||
return list.Add(o)
|
||||
})
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
|
||||
@@ -704,7 +704,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
|
||||
125
backend/s3/s3.go
125
backend/s3/s3.go
@@ -48,8 +48,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -101,12 +101,6 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
}, {
|
||||
Value: "Exaba",
|
||||
Help: "Exaba Object Storage",
|
||||
}, {
|
||||
Value: "FlashBlade",
|
||||
Help: "Pure Storage FlashBlade Object Storage",
|
||||
}, {
|
||||
Value: "GCS",
|
||||
Help: "Google Cloud Storage",
|
||||
@@ -137,9 +131,6 @@ var providerOption = fs.Option{
|
||||
}, {
|
||||
Value: "Magalu",
|
||||
Help: "Magalu Object Storage",
|
||||
}, {
|
||||
Value: "Mega",
|
||||
Help: "MEGA S4 Object Storage",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -576,7 +567,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -1012,12 +1003,6 @@ func init() {
|
||||
Value: "us-iad-1.linodeobjects.com",
|
||||
Help: "Washington, DC, (USA), us-iad-1",
|
||||
}},
|
||||
}, {
|
||||
// Lyve Cloud endpoints
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Lyve Cloud S3 API.\nRequired when using an S3 clone. Please type in your LyveCloud endpoint.\nExamples:\n- s3.us-west-1.{account_name}.lyve.seagate.com (US West 1 - California)\n- s3.eu-west-1.{account_name}.lyve.seagate.com (EU West 1 - Ireland)",
|
||||
Provider: "LyveCloud",
|
||||
Required: true,
|
||||
}, {
|
||||
// Magalu endpoints: https://docs.magalu.cloud/docs/object-storage/how-to/copy-url
|
||||
Name: "endpoint",
|
||||
@@ -1392,7 +1377,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1441,6 +1426,18 @@ func init() {
|
||||
Value: "localhost:8333",
|
||||
Help: "SeaweedFS S3 localhost",
|
||||
Provider: "SeaweedFS",
|
||||
}, {
|
||||
Value: "s3.us-east-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US East 1 (Virginia)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.us-west-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud US West 1 (California)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "oos.eu-west-2.outscale.com",
|
||||
Help: "Outscale EU West 2 (Paris)",
|
||||
@@ -1529,22 +1526,6 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "s3.eu-central-1.s4.mega.io",
|
||||
Help: "Mega S4 eu-central-1 (Amsterdam)",
|
||||
Provider: "Mega",
|
||||
}, {
|
||||
Value: "s3.eu-central-2.s4.mega.io",
|
||||
Help: "Mega S4 eu-central-2 (Bettembourg)",
|
||||
Provider: "Mega",
|
||||
}, {
|
||||
Value: "s3.ca-central-1.s4.mega.io",
|
||||
Help: "Mega S4 ca-central-1 (Montreal)",
|
||||
Provider: "Mega",
|
||||
}, {
|
||||
Value: "s3.ca-west-1.s4.mega.io",
|
||||
Help: "Mega S4 ca-west-1 (Vancouver)",
|
||||
Provider: "Mega",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -1927,7 +1908,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
|
||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1942,7 +1923,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade,Mega",
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -2000,7 +1981,6 @@ isn't set then "acl" is used instead.
|
||||
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
||||
header is added and the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Selectel,Synology,Cloudflare,FlashBlade",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
@@ -3136,9 +3116,6 @@ func parsePath(path string) (root string) {
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
if f.opt.DirectoryMarkers && strings.HasSuffix(bucketPath, "//") {
|
||||
bucketPath = bucketPath[:len(bucketPath)-1]
|
||||
}
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -3518,9 +3495,6 @@ func setQuirks(opt *Options) {
|
||||
case "Dreamhost":
|
||||
urlEncodeListings = false
|
||||
useAlreadyExists = false // untested
|
||||
case "FlashBlade":
|
||||
mightGzip = false // Never auto gzips objects
|
||||
virtualHostStyle = false // supports vhost but defaults to paths
|
||||
case "IBMCOS":
|
||||
listObjectsV2 = false // untested
|
||||
virtualHostStyle = false
|
||||
@@ -3553,14 +3527,6 @@ func setQuirks(opt *Options) {
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false
|
||||
case "Mega":
|
||||
listObjectsV2 = true
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = false
|
||||
useAlreadyExists = false
|
||||
// Multipart server side copies not supported
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
case "Minio":
|
||||
virtualHostStyle = false
|
||||
case "Netease":
|
||||
@@ -3631,8 +3597,6 @@ func setQuirks(opt *Options) {
|
||||
urlEncodeListings = false
|
||||
virtualHostStyle = false
|
||||
useAlreadyExists = false // untested
|
||||
case "Exaba":
|
||||
virtualHostStyle = false
|
||||
case "GCS":
|
||||
// Google break request Signature by mutating accept-encoding HTTP header
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
@@ -4461,7 +4425,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
if isDirectory {
|
||||
// process directory markers as directories
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
}
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
@@ -4517,7 +4481,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *types.Ob
|
||||
}
|
||||
|
||||
// listDir lists files and directories to out
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects and directories
|
||||
err = f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
@@ -4533,16 +4497,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
return callback(entry)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return nil
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets to out
|
||||
@@ -4575,46 +4539,14 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return list.Flush()
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -4635,7 +4567,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
|
||||
// of listing recursively than doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
@@ -4776,7 +4708,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
|
||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||
func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
@@ -5129,7 +5061,7 @@ or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Fre
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/ --include /object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||
@@ -6911,7 +6843,6 @@ var (
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -483,14 +482,6 @@ Example:
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "http_proxy",
|
||||
Default: "",
|
||||
Help: `URL for HTTP CONNECT proxy
|
||||
|
||||
Set this to a URL for an HTTP proxy which supports the HTTP CONNECT verb.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_is_hardlink",
|
||||
Default: false,
|
||||
@@ -554,7 +545,6 @@ type Options struct {
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
CopyIsHardlink bool `config:"copy_is_hardlink"`
|
||||
}
|
||||
|
||||
@@ -580,7 +570,6 @@ type Fs struct {
|
||||
savedpswd string
|
||||
sessions atomic.Int32 // count in use sessions
|
||||
tokens *pacer.TokenDispenser
|
||||
proxyURL *url.URL // address of HTTP proxy read from environment
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -878,15 +867,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt.Port = "22"
|
||||
}
|
||||
|
||||
// get proxy URL if set
|
||||
if opt.HTTPProxy != "" {
|
||||
proxyURL, err := url.Parse(opt.HTTPProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse HTTP Proxy URL: %w", err)
|
||||
}
|
||||
f.proxyURL = proxyURL
|
||||
}
|
||||
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: opt.User,
|
||||
Auth: []ssh.AuthMethod{},
|
||||
|
||||
@@ -31,8 +31,6 @@ func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, ssh
|
||||
)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
conn, err = proxy.HTTPConnectDial(network, addr, f.proxyURL, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, addr)
|
||||
}
|
||||
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -846,7 +846,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// of listing recursively than doing a directory traversal.
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
container, directory := f.split(dir)
|
||||
list := list.NewHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(container, directory, prefix string, addContainer bool) error {
|
||||
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
|
||||
return list.Add(entry)
|
||||
|
||||
@@ -1020,9 +1020,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
// Disable ListP always
|
||||
features.ListP = nil
|
||||
|
||||
// show that we wrap other backends
|
||||
features.Overlay = true
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter", "ListP"}
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
|
||||
@@ -82,37 +82,22 @@ type Prop struct {
|
||||
// Parse a status of the form "HTTP/1.1 200 OK" or "HTTP/1.1 200"
|
||||
var parseStatus = regexp.MustCompile(`^HTTP/[0-9.]+\s+(\d+)`)
|
||||
|
||||
// Code extracts the status code from the first status
|
||||
func (p *Prop) Code() int {
|
||||
// StatusOK examines the Status and returns an OK flag
|
||||
func (p *Prop) StatusOK() bool {
|
||||
// Assume OK if no statuses received
|
||||
if len(p.Status) == 0 {
|
||||
return -1
|
||||
return true
|
||||
}
|
||||
match := parseStatus.FindStringSubmatch(p.Status[0])
|
||||
if len(match) < 2 {
|
||||
return 0
|
||||
return false
|
||||
}
|
||||
code, err := strconv.Atoi(match[1])
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
// StatusOK examines the Status and returns an OK flag
|
||||
func (p *Prop) StatusOK() bool {
|
||||
// Fetch status code as int
|
||||
c := p.Code()
|
||||
|
||||
// Assume OK if no statuses received
|
||||
if c == -1 {
|
||||
return true
|
||||
}
|
||||
if c == 0 {
|
||||
return false
|
||||
}
|
||||
if c >= 200 && c < 300 {
|
||||
if code >= 200 && code < 300 {
|
||||
return true
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrChunkSize is returned when the chunk size is zero
|
||||
ErrChunkSize = errors.New("tus chunk size must be greater than zero")
|
||||
// ErrNilLogger is returned when the logger is nil
|
||||
ErrNilLogger = errors.New("tus logger can't be nil")
|
||||
// ErrNilStore is returned when the store is nil
|
||||
ErrNilStore = errors.New("tus store can't be nil if resume is enable")
|
||||
// ErrNilUpload is returned when the upload is nil
|
||||
ErrNilUpload = errors.New("tus upload can't be nil")
|
||||
// ErrLargeUpload is returned when the upload body is to large
|
||||
ErrLargeUpload = errors.New("tus upload body is to large")
|
||||
// ErrVersionMismatch is returned when the tus protocol version is mismatching
|
||||
ErrVersionMismatch = errors.New("tus protocol version mismatch")
|
||||
// ErrOffsetMismatch is returned when the tus upload offset is mismatching
|
||||
ErrOffsetMismatch = errors.New("tus upload offset mismatch")
|
||||
// ErrUploadNotFound is returned when the tus upload is not found
|
||||
ErrUploadNotFound = errors.New("tus upload not found")
|
||||
// ErrResumeNotEnabled is returned when the tus resuming is not enabled
|
||||
ErrResumeNotEnabled = errors.New("tus resuming not enabled")
|
||||
// ErrFingerprintNotSet is returned when the tus fingerprint is not set
|
||||
ErrFingerprintNotSet = errors.New("tus fingerprint not set")
|
||||
)
|
||||
|
||||
// ClientError represents an error state of a client
|
||||
type ClientError struct {
|
||||
Code int
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// Error returns an error string containing the client error code
|
||||
func (c ClientError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code: %d", c.Code)
|
||||
}
|
||||
@@ -1,88 +0,0 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Metadata is a typedef for a string to string map to hold metadata
|
||||
type Metadata map[string]string
|
||||
|
||||
// Upload is a struct containing the file status during upload
|
||||
type Upload struct {
|
||||
stream io.ReadSeeker
|
||||
size int64
|
||||
offset int64
|
||||
|
||||
Fingerprint string
|
||||
Metadata Metadata
|
||||
}
|
||||
|
||||
// Updates the Upload information based on offset.
|
||||
func (u *Upload) updateProgress(offset int64) {
|
||||
u.offset = offset
|
||||
}
|
||||
|
||||
// Finished returns whether this upload is finished or not.
|
||||
func (u *Upload) Finished() bool {
|
||||
return u.offset >= u.size
|
||||
}
|
||||
|
||||
// Progress returns the progress in a percentage.
|
||||
func (u *Upload) Progress() int64 {
|
||||
return (u.offset * 100) / u.size
|
||||
}
|
||||
|
||||
// Offset returns the current upload offset.
|
||||
func (u *Upload) Offset() int64 {
|
||||
return u.offset
|
||||
}
|
||||
|
||||
// Size returns the size of the upload body.
|
||||
func (u *Upload) Size() int64 {
|
||||
return u.size
|
||||
}
|
||||
|
||||
// EncodedMetadata encodes the upload metadata.
|
||||
func (u *Upload) EncodedMetadata() string {
|
||||
var encoded []string
|
||||
|
||||
for k, v := range u.Metadata {
|
||||
encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
|
||||
}
|
||||
|
||||
return strings.Join(encoded, ",")
|
||||
}
|
||||
|
||||
func b64encode(s string) string {
|
||||
return base64.StdEncoding.EncodeToString([]byte(s))
|
||||
}
|
||||
|
||||
// NewUpload creates a new upload from an io.Reader.
|
||||
func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
|
||||
stream, ok := reader.(io.ReadSeeker)
|
||||
|
||||
if !ok {
|
||||
buf := new(bytes.Buffer)
|
||||
_, err := buf.ReadFrom(reader)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
stream = bytes.NewReader(buf.Bytes())
|
||||
}
|
||||
|
||||
if metadata == nil {
|
||||
metadata = make(Metadata)
|
||||
}
|
||||
|
||||
return &Upload{
|
||||
stream: stream,
|
||||
size: size,
|
||||
|
||||
Fingerprint: fingerprint,
|
||||
Metadata: metadata,
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
package webdav
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Uploader holds all information about a currently running upload
|
||||
type Uploader struct {
|
||||
fs *Fs
|
||||
url string
|
||||
upload *Upload
|
||||
offset int64
|
||||
aborted bool
|
||||
uploadSubs []chan Upload
|
||||
notifyChan chan bool
|
||||
overridePatchMethod bool
|
||||
}
|
||||
|
||||
// NotifyUploadProgress subscribes to progress updates.
|
||||
func (u *Uploader) NotifyUploadProgress(c chan Upload) {
|
||||
u.uploadSubs = append(u.uploadSubs, c)
|
||||
}
|
||||
|
||||
func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
|
||||
if resp == nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 204:
|
||||
if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
|
||||
*newOff = off
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
|
||||
case 409:
|
||||
return false, ErrOffsetMismatch
|
||||
case 412:
|
||||
return false, ErrVersionMismatch
|
||||
case 413:
|
||||
return false, ErrLargeUpload
|
||||
}
|
||||
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
}
|
||||
|
||||
func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
|
||||
var method string
|
||||
|
||||
if !u.overridePatchMethod {
|
||||
method = "PATCH"
|
||||
} else {
|
||||
method = "POST"
|
||||
}
|
||||
|
||||
extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
|
||||
extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
|
||||
extraHeaders["Tus-Resumable"] = "1.0.0"
|
||||
extraHeaders["filetype"] = u.upload.Metadata["filetype"]
|
||||
if u.overridePatchMethod {
|
||||
extraHeaders["X-HTTP-Method-Override"] = "PATCH"
|
||||
}
|
||||
|
||||
url, err := url.Parse(u.url)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("upload Chunk failed, could not parse url")
|
||||
}
|
||||
|
||||
// FIXME: Use GetBody func as in chunking.go
|
||||
opts := rest.Opts{
|
||||
Method: method,
|
||||
Path: url.Path,
|
||||
NoResponse: true,
|
||||
RootURL: fmt.Sprintf("%s://%s", url.Scheme, url.Host),
|
||||
ContentLength: &size,
|
||||
Body: body,
|
||||
ContentType: "application/offset+octet-stream",
|
||||
ExtraHeaders: extraHeaders,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
var newOffset int64
|
||||
|
||||
err = u.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
res, err := u.fs.srv.Call(ctx, &opts)
|
||||
return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("uploadChunk failed: %w", err)
|
||||
// FIXME What do we do here? Remove the entire upload?
|
||||
// See https://github.com/tus/tusd/issues/176
|
||||
}
|
||||
|
||||
return newOffset, nil
|
||||
}
|
||||
|
||||
// Upload uploads the entire body to the server.
|
||||
func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
|
||||
cnt := 1
|
||||
|
||||
fs.Debug(u.fs, "Uploaded starts")
|
||||
for u.offset < u.upload.size && !u.aborted {
|
||||
err := u.UploadChunk(ctx, cnt, options...)
|
||||
cnt++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fs.Debug(u.fs, "-- Uploaded finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UploadChunk uploads a single chunk.
|
||||
func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
|
||||
chunkSize := u.fs.opt.ChunkSize
|
||||
data := make([]byte, chunkSize)
|
||||
|
||||
_, err := u.upload.stream.Seek(u.offset, 0)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
size, err := u.upload.stream.Read(data)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
body := bytes.NewBuffer(data[:size])
|
||||
|
||||
newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
|
||||
} else {
|
||||
fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
u.offset = newOffset
|
||||
|
||||
u.upload.updateProgress(u.offset)
|
||||
|
||||
u.notifyChan <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Waits for a signal to broadcast to all subscribers
|
||||
func (u *Uploader) broadcastProgress() {
|
||||
for range u.notifyChan {
|
||||
for _, c := range u.uploadSubs {
|
||||
c <- *u.upload
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewUploader creates a new Uploader.
|
||||
func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
|
||||
notifyChan := make(chan bool)
|
||||
|
||||
uploader := &Uploader{
|
||||
f,
|
||||
url,
|
||||
upload,
|
||||
offset,
|
||||
false,
|
||||
nil,
|
||||
notifyChan,
|
||||
false,
|
||||
}
|
||||
|
||||
go uploader.broadcastProgress()
|
||||
|
||||
return uploader
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
package webdav
|
||||
|
||||
/*
|
||||
Chunked upload based on the tus protocol for ownCloud Infinite Scale
|
||||
See https://tus.io/protocols/resumable-upload
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
|
||||
fn := filepath.Base(src.Remote())
|
||||
metadata := map[string]string{
|
||||
"filename": fn,
|
||||
"mtime": strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
|
||||
"filetype": contentType,
|
||||
}
|
||||
|
||||
// Fingerprint is used to identify the upload when resuming. That is not yet implemented
|
||||
fingerprint := ""
|
||||
|
||||
// create an upload from a file.
|
||||
upload := NewUpload(in, src.Size(), metadata, fingerprint)
|
||||
|
||||
// create the uploader.
|
||||
uploader, err := o.CreateUploader(ctx, upload, options...)
|
||||
if err == nil {
|
||||
// start the uploading process.
|
||||
err = uploader.Upload(ctx, options...)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *Fs) getTusLocationOrRetry(ctx context.Context, resp *http.Response, err error) (bool, string, error) {
|
||||
|
||||
switch resp.StatusCode {
|
||||
case 201:
|
||||
location := resp.Header.Get("Location")
|
||||
return false, location, nil
|
||||
case 412:
|
||||
return false, "", ErrVersionMismatch
|
||||
case 413:
|
||||
return false, "", ErrLargeUpload
|
||||
}
|
||||
|
||||
retry, err := f.shouldRetry(ctx, resp, err)
|
||||
return retry, "", err
|
||||
}
|
||||
|
||||
// CreateUploader creates a new upload to the server.
|
||||
func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
|
||||
if u == nil {
|
||||
return nil, ErrNilUpload
|
||||
}
|
||||
|
||||
// if c.Config.Resume && len(u.Fingerprint) == 0 {
|
||||
// return nil, ErrFingerprintNotSet
|
||||
// }
|
||||
|
||||
l := int64(0)
|
||||
p := o.filePath()
|
||||
// cut the filename off
|
||||
dir, _ := filepath.Split(p)
|
||||
if dir == "" {
|
||||
dir = "/"
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: dir,
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.endpointURL,
|
||||
ContentLength: &l,
|
||||
ExtraHeaders: o.extraHeaders(ctx, o),
|
||||
Options: options,
|
||||
}
|
||||
opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
|
||||
opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
|
||||
opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
|
||||
// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
|
||||
|
||||
var tusLocation string
|
||||
// rclone http call
|
||||
err := o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
var retry bool
|
||||
res, err := o.fs.srv.Call(ctx, &opts)
|
||||
retry, tusLocation, err = o.fs.getTusLocationOrRetry(ctx, res, err)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making upload directory failed: %w", err)
|
||||
}
|
||||
|
||||
uploader := NewUploader(o.fs, tusLocation, u, 0)
|
||||
|
||||
return uploader, nil
|
||||
}
|
||||
@@ -84,10 +84,7 @@ func init() {
|
||||
Help: "Nextcloud",
|
||||
}, {
|
||||
Value: "owncloud",
|
||||
Help: "Owncloud 10 PHP based WebDAV server",
|
||||
}, {
|
||||
Value: "infinitescale",
|
||||
Help: "ownCloud Infinite Scale",
|
||||
Help: "Owncloud",
|
||||
}, {
|
||||
Value: "sharepoint",
|
||||
Help: "Sharepoint Online, authenticated by Microsoft account",
|
||||
@@ -215,7 +212,6 @@ type Fs struct {
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
canStream bool // set if can stream
|
||||
canTus bool // supports the TUS upload protocol
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
propsetMtime bool // set if can use propset
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
@@ -266,7 +262,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
423, // Locked
|
||||
425, // Too Early
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
@@ -378,8 +373,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
item := result.Responses[0]
|
||||
// status code 425 is accepted here as well
|
||||
if !(item.Props.StatusOK() || item.Props.Code() == 425) {
|
||||
if !item.Props.StatusOK() {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if itemIsDir(&item) {
|
||||
@@ -636,15 +630,6 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = true
|
||||
f.hasOCSHA1 = true
|
||||
case "infinitescale":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.propsetMtime = true
|
||||
f.hasOCMD5 = false
|
||||
f.hasOCSHA1 = true
|
||||
f.canChunk = false
|
||||
f.canTus = true
|
||||
f.opt.ChunkSize = 10 * fs.Mebi
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
@@ -1342,7 +1327,7 @@ func (o *Object) Size() int64 {
|
||||
ctx := context.TODO()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(o, "Failed to read metadata: %v", err)
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
}
|
||||
return o.size
|
||||
@@ -1386,7 +1371,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(o, "Failed to read metadata: %v", err)
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
@@ -1512,21 +1497,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return fmt.Errorf("Update mkParentDir failed: %w", err)
|
||||
}
|
||||
|
||||
if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
|
||||
fs.Debugf(src, "Update will use the tus protocol to upload")
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
err = o.updateViaTus(ctx, in, contentType, src, options...)
|
||||
if err != nil {
|
||||
fs.Debug(src, "tus update failed.")
|
||||
return fmt.Errorf("tus update failed: %w", err)
|
||||
}
|
||||
} else if o.shouldUseChunkedUpload(src) {
|
||||
if o.fs.opt.Vendor == "nextcloud" {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
} else {
|
||||
fs.Debug(src, "Chunking - unknown vendor")
|
||||
}
|
||||
if o.shouldUseChunkedUpload(src) {
|
||||
fs.Debugf(src, "Update will use the chunked upload strategy")
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1538,9 +1511,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// TODO: define getBody() to enable low-level HTTP/2 retries
|
||||
err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unchunked simple update failed: %w", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
@@ -1550,7 +1524,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This script checks for unauthorized modifications in autogenerated sections of markdown files.
|
||||
It is designed to be used in a GitHub Actions workflow or a local pre-commit hook.
|
||||
|
||||
Features:
|
||||
- Detects markdown files changed in the last commit.
|
||||
- Identifies modified autogenerated sections marked by specific comments.
|
||||
- Reports violations using GitHub Actions error messages.
|
||||
- Exits with a nonzero status code if unauthorized changes are found.
|
||||
|
||||
It currently only checks the last commit.
|
||||
"""
|
||||
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
def run_git(args):
|
||||
"""
|
||||
Run a Git command with the provided arguments and return its output as a string.
|
||||
"""
|
||||
return subprocess.run(["git"] + args, stdout=subprocess.PIPE, text=True, check=True).stdout.strip()
|
||||
|
||||
def get_changed_files():
|
||||
"""
|
||||
Retrieve a list of markdown files that were changed in the last commit.
|
||||
"""
|
||||
files = run_git(["diff", "--name-only", "HEAD~1", "HEAD"]).splitlines()
|
||||
return [f for f in files if f.endswith(".md")]
|
||||
|
||||
def get_diff(file):
|
||||
"""
|
||||
Get the diff of a given file between the last commit and the current version.
|
||||
"""
|
||||
return run_git(["diff", "-U0", "HEAD~1", "HEAD", "--", file]).splitlines()
|
||||
|
||||
def get_file_content(ref, file):
|
||||
"""
|
||||
Retrieve the content of a file from a given Git reference.
|
||||
"""
|
||||
try:
|
||||
return run_git(["show", f"{ref}:{file}"]).splitlines()
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def find_regions(lines):
|
||||
"""
|
||||
Identify the start and end line numbers of autogenerated regions in a file.
|
||||
"""
|
||||
regions = []
|
||||
start = None
|
||||
for i, line in enumerate(lines, 1):
|
||||
if "rem autogenerated options start" in line:
|
||||
start = i
|
||||
elif "rem autogenerated options stop" in line and start is not None:
|
||||
regions.append((start, i))
|
||||
start = None
|
||||
return regions
|
||||
|
||||
def in_region(ln, regions):
|
||||
"""
|
||||
Check if a given line number falls within an autogenerated region.
|
||||
"""
|
||||
return any(start <= ln <= end for start, end in regions)
|
||||
|
||||
def show_error(file_name, line, message):
|
||||
"""
|
||||
Print an error message in a GitHub Actions-compatible format.
|
||||
"""
|
||||
print(f"::error file={file_name},line={line}::{message} at {file_name} line {line}")
|
||||
|
||||
def check_file(file):
|
||||
"""
|
||||
Check a markdown file for modifications in autogenerated regions.
|
||||
"""
|
||||
viol = False
|
||||
new_lines = get_file_content("HEAD", file)
|
||||
old_lines = get_file_content("HEAD~1", file)
|
||||
|
||||
# If old file did not exist or was empty then don't check
|
||||
if not old_lines:
|
||||
return
|
||||
|
||||
# Entire autogenerated file check.
|
||||
if any("autogenerated - DO NOT EDIT" in l for l in new_lines[:10]):
|
||||
if get_diff(file):
|
||||
show_error(file, 1, "Autogenerated file modified")
|
||||
return True
|
||||
return False
|
||||
|
||||
# Partial autogenerated regions.
|
||||
regions_new = find_regions(new_lines)
|
||||
regions_old = find_regions(old_lines)
|
||||
diff = get_diff(file)
|
||||
hunk_re = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@")
|
||||
new_ln = old_ln = None
|
||||
|
||||
for line in diff:
|
||||
if line.startswith("@@"):
|
||||
m = hunk_re.match(line)
|
||||
if m:
|
||||
old_ln = int(m.group(1))
|
||||
new_ln = int(m.group(3))
|
||||
elif new_ln is None:
|
||||
continue
|
||||
elif line.startswith("+"):
|
||||
if in_region(new_ln, regions_new):
|
||||
show_error(file, new_ln, "Autogenerated region of file modified")
|
||||
viol = True
|
||||
new_ln += 1
|
||||
elif line.startswith("-"):
|
||||
if in_region(old_ln, regions_old):
|
||||
show_error(file, old_ln, "Autogenerated region of file modified")
|
||||
viol = True
|
||||
old_ln += 1
|
||||
else:
|
||||
new_ln += 1
|
||||
old_ln += 1
|
||||
|
||||
return viol
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function that iterates over changed files and checks them for violations.
|
||||
"""
|
||||
found = False
|
||||
for f in get_changed_files():
|
||||
if check_file(f):
|
||||
found = True
|
||||
if found:
|
||||
sys.exit(1)
|
||||
print("No unauthorized edits found in autogenerated sections.")
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -41,10 +41,8 @@ docs = [
|
||||
"crypt.md",
|
||||
"compress.md",
|
||||
"combine.md",
|
||||
"doi.md",
|
||||
"dropbox.md",
|
||||
"filefabric.md",
|
||||
"filelu.md",
|
||||
"filescom.md",
|
||||
"ftp.md",
|
||||
"gofile.md",
|
||||
|
||||
@@ -53,15 +53,6 @@ import (
|
||||
_ "github.com/rclone/rclone/cmd/rmdirs"
|
||||
_ "github.com/rclone/rclone/cmd/selfupdate"
|
||||
_ "github.com/rclone/rclone/cmd/serve"
|
||||
_ "github.com/rclone/rclone/cmd/serve/dlna"
|
||||
_ "github.com/rclone/rclone/cmd/serve/docker"
|
||||
_ "github.com/rclone/rclone/cmd/serve/ftp"
|
||||
_ "github.com/rclone/rclone/cmd/serve/http"
|
||||
_ "github.com/rclone/rclone/cmd/serve/nfs"
|
||||
_ "github.com/rclone/rclone/cmd/serve/restic"
|
||||
_ "github.com/rclone/rclone/cmd/serve/s3"
|
||||
_ "github.com/rclone/rclone/cmd/serve/sftp"
|
||||
_ "github.com/rclone/rclone/cmd/serve/webdav"
|
||||
_ "github.com/rclone/rclone/cmd/settier"
|
||||
_ "github.com/rclone/rclone/cmd/sha1sum"
|
||||
_ "github.com/rclone/rclone/cmd/size"
|
||||
|
||||
@@ -23,23 +23,19 @@ func init() {
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
// "groups": "",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package authorize
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func TestAuthorizeCommand(t *testing.T) {
|
||||
// Test that the Use string is correctly formatted
|
||||
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
|
||||
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
|
||||
}
|
||||
|
||||
// Test that help output contains the argument information
|
||||
buf := &bytes.Buffer{}
|
||||
cmd := &cobra.Command{}
|
||||
cmd.AddCommand(commandDefinition)
|
||||
cmd.SetOut(buf)
|
||||
cmd.SetArgs([]string{"authorize", "--help"})
|
||||
err := cmd.Execute()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to execute help command: %v", err)
|
||||
}
|
||||
|
||||
helpOutput := buf.String()
|
||||
if !strings.Contains(helpOutput, "authorize <fs name>") {
|
||||
t.Errorf("Help output doesn't contain correct usage information")
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
@@ -83,3 +85,81 @@ func (am AliasMap) Alias(name1 string) string {
|
||||
}
|
||||
return name1
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
||||
@@ -3,22 +3,20 @@ package bilib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log/slog"
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output at log level INFO.
|
||||
// CaptureOutput runs a function capturing its output.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
logSave := log.Writer()
|
||||
logrusSave := logrus.StandardLogger().Out
|
||||
buf := &bytes.Buffer{}
|
||||
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
buf.WriteString(text)
|
||||
})
|
||||
defer func() {
|
||||
log.Handler.ResetOutput()
|
||||
log.Handler.SetLevel(oldLevel)
|
||||
}()
|
||||
log.SetOutput(buf)
|
||||
logrus.SetOutput(buf)
|
||||
fun()
|
||||
log.SetOutput(logSave)
|
||||
logrus.SetOutput(logrusSave)
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -11,7 +13,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
)
|
||||
|
||||
// Prefer describes strategies for resolving sync conflicts
|
||||
@@ -96,8 +97,8 @@ func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
|
||||
}
|
||||
// replace glob variables, if any
|
||||
t := time.Now() // capture static time here so it is the same for all files throughout this run
|
||||
b.opt.ConflictSuffix1 = transform.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = transform.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
|
||||
// append dot (intentionally allow more than one)
|
||||
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
|
||||
@@ -129,7 +130,6 @@ type (
|
||||
path2 namePair
|
||||
}
|
||||
)
|
||||
|
||||
type namePair struct {
|
||||
oldName string
|
||||
newName string
|
||||
@@ -240,7 +240,24 @@ func SuffixName(ctx context.Context, remote, suffix string) string {
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.SuffixKeepExtension {
|
||||
return transform.SuffixKeepExtension(remote, suffix)
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
}
|
||||
return remote + suffix
|
||||
}
|
||||
|
||||
@@ -3,104 +3,502 @@ package convmv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/text/encoding/charmap"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
deleteEmptySrcDirs = false
|
||||
createEmptySrcDirs = false
|
||||
Opt ConvOpt
|
||||
Cmaps = map[int]*charmap.Charmap{}
|
||||
)
|
||||
|
||||
// ConvOpt sets the conversion options
|
||||
type ConvOpt struct {
|
||||
ctx context.Context
|
||||
f fs.Fs
|
||||
ConvertAlgo Convert
|
||||
FindReplace []string
|
||||
Prefix string
|
||||
Suffix string
|
||||
Max int
|
||||
Enc encoder.MultiEncoder
|
||||
CmapFlag fs.Enum[cmapChoices]
|
||||
Cmap *charmap.Charmap
|
||||
List bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move", "")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConvertAlgo, "conv", "t", "Conversion algorithm: "+Opt.ConvertAlgo.Help(), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Prefix, "prefix", "", "", "In 'prefix' or 'trimprefix' mode, append or trim this prefix", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Suffix, "suffix", "", "", "In 'suffix' or 'trimsuffix' mode, append or trim this suffix", "")
|
||||
flags.IntVarP(cmdFlags, &Opt.Max, "max", "m", -1, "In 'truncate' mode, truncate all path segments longer than this many characters", "")
|
||||
flags.StringArrayVarP(cmdFlags, &Opt.FindReplace, "replace", "r", nil, "In 'replace' mode, this is a pair of find,replace values (can repeat flag more than once)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.Enc, "encoding", "", "Custom backend encoding: (use --list to see full list)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.CmapFlag, "charmap", "", "Other character encoding (use --list to see full list) ", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.List, "list", "", false, "Print full list of options", "")
|
||||
}
|
||||
|
||||
// Convert describes conversion setting
|
||||
type Convert = fs.Enum[convertChoices]
|
||||
|
||||
// Supported conversion options
|
||||
const (
|
||||
ConvNone Convert = iota
|
||||
ConvToNFC
|
||||
ConvToNFD
|
||||
ConvToNFKC
|
||||
ConvToNFKD
|
||||
ConvFindReplace
|
||||
ConvPrefix
|
||||
ConvSuffix
|
||||
ConvTrimPrefix
|
||||
ConvTrimSuffix
|
||||
ConvIndex
|
||||
ConvDate
|
||||
ConvTruncate
|
||||
ConvBase64Encode
|
||||
ConvBase64Decode
|
||||
ConvEncoder
|
||||
ConvDecoder
|
||||
ConvISO8859_1
|
||||
ConvWindows1252
|
||||
ConvMacintosh
|
||||
ConvCharmap
|
||||
ConvLowercase
|
||||
ConvUppercase
|
||||
ConvTitlecase
|
||||
ConvASCII
|
||||
ConvURL
|
||||
ConvMapper
|
||||
)
|
||||
|
||||
type convertChoices struct{}
|
||||
|
||||
func (convertChoices) Choices() []string {
|
||||
return []string{
|
||||
ConvNone: "none",
|
||||
ConvToNFC: "nfc",
|
||||
ConvToNFD: "nfd",
|
||||
ConvToNFKC: "nfkc",
|
||||
ConvToNFKD: "nfkd",
|
||||
ConvFindReplace: "replace",
|
||||
ConvPrefix: "prefix",
|
||||
ConvSuffix: "suffix",
|
||||
ConvTrimPrefix: "trimprefix",
|
||||
ConvTrimSuffix: "trimsuffix",
|
||||
ConvIndex: "index",
|
||||
ConvDate: "date",
|
||||
ConvTruncate: "truncate",
|
||||
ConvBase64Encode: "base64encode",
|
||||
ConvBase64Decode: "base64decode",
|
||||
ConvEncoder: "encoder",
|
||||
ConvDecoder: "decoder",
|
||||
ConvISO8859_1: "ISO-8859-1",
|
||||
ConvWindows1252: "Windows-1252",
|
||||
ConvMacintosh: "Macintosh",
|
||||
ConvCharmap: "charmap",
|
||||
ConvLowercase: "lowercase",
|
||||
ConvUppercase: "uppercase",
|
||||
ConvTitlecase: "titlecase",
|
||||
ConvASCII: "ascii",
|
||||
ConvURL: "url",
|
||||
ConvMapper: "mapper",
|
||||
}
|
||||
}
|
||||
|
||||
func (convertChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
type cmapChoices struct{}
|
||||
|
||||
func (cmapChoices) Choices() []string {
|
||||
choices := make([]string, 1)
|
||||
i := 0
|
||||
for _, enc := range charmap.All {
|
||||
c, ok := enc.(*charmap.Charmap)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
name := strings.ReplaceAll(c.String(), " ", "-")
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("unknown-%d", i)
|
||||
}
|
||||
Cmaps[i] = c
|
||||
choices = append(choices, name)
|
||||
i++
|
||||
}
|
||||
return choices
|
||||
}
|
||||
|
||||
func (cmapChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
func charmapByID(cm fs.Enum[cmapChoices]) *charmap.Charmap {
|
||||
c, ok := Cmaps[int(cm)]
|
||||
if ok {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "convmv dest:path --name-transform XXX",
|
||||
Short: `Convert file and directory names in place.`,
|
||||
// Warning¡ "¡" will be replaced by backticks below
|
||||
Use: "convmv source:path",
|
||||
Short: `Convert file and directory names`,
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
This command renames files and directory names according a user supplied conversion.
|
||||
|
||||
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
It is useful for renaming a lot of files in an automated way.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
`+sprintList()+`
|
||||
|
||||
## Files vs Directories
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
| ¡file¡ | Only transform the leaf name of files (DEFAULT) |
|
||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||
| ¡all¡ | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
|
||||
## Ordering and Conflicts
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
|
||||
To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.70",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst, srcFileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(false, true, command, func() error {
|
||||
if !transform.Transforming(context.Background()) {
|
||||
return errors.New("--name-transform must be set")
|
||||
}
|
||||
if srcFileName == "" {
|
||||
return sync.Transform(context.Background(), fdst, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||
}
|
||||
return operations.TransformFile(context.Background(), fdst, srcFileName)
|
||||
fsrc, srcFileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(false, true, command, func() error { // retries switched off to prevent double-encoding
|
||||
return Convmv(context.Background(), fsrc, srcFileName)
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// Convmv converts and renames files and directories
|
||||
// pass srcFileName == "" to convmv every object in fsrc instead of a single object
|
||||
func Convmv(ctx context.Context, f fs.Fs, srcFileName string) error {
|
||||
Opt.ctx = ctx
|
||||
Opt.f = f
|
||||
if Opt.List {
|
||||
printList()
|
||||
return nil
|
||||
}
|
||||
err := Opt.validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if srcFileName == "" {
|
||||
// it's a dir
|
||||
return walkConv(ctx, f, "")
|
||||
}
|
||||
// it's a file
|
||||
obj, err := f.NewObject(Opt.ctx, srcFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldName, newName, skip, err := parseEntry(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
return operations.MoveFile(Opt.ctx, Opt.f, Opt.f, newName, oldName)
|
||||
}
|
||||
|
||||
func (opt *ConvOpt) validate() error {
|
||||
switch opt.ConvertAlgo {
|
||||
case ConvNone:
|
||||
return errors.New("must choose a conversion mode with -t flag")
|
||||
case ConvFindReplace:
|
||||
if len(opt.FindReplace) == 0 {
|
||||
return errors.New("must include --replace flag in replace mode")
|
||||
}
|
||||
for _, set := range opt.FindReplace {
|
||||
split := strings.Split(set, ",")
|
||||
if len(split) != 2 {
|
||||
return errors.New("--replace must include exactly two comma-separated values")
|
||||
}
|
||||
if split[0] == "" {
|
||||
return errors.New("'find' value cannot be blank ('replace' can be)")
|
||||
}
|
||||
}
|
||||
case ConvPrefix, ConvTrimPrefix:
|
||||
if opt.Prefix == "" {
|
||||
return errors.New("must include a --prefix")
|
||||
}
|
||||
case ConvSuffix, ConvTrimSuffix:
|
||||
if opt.Suffix == "" {
|
||||
return errors.New("must include a --suffix")
|
||||
}
|
||||
case ConvTruncate:
|
||||
if opt.Max < 1 {
|
||||
return errors.New("--max cannot be less than 1 in 'truncate' mode")
|
||||
}
|
||||
case ConvCharmap:
|
||||
if opt.CmapFlag == 0 {
|
||||
return errors.New("must specify a charmap with --charmap flag")
|
||||
}
|
||||
c := charmapByID(opt.CmapFlag)
|
||||
if c == nil {
|
||||
return errors.New("unknown charmap")
|
||||
}
|
||||
opt.Cmap = c
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// keeps track of which dirs we've already renamed
|
||||
func walkConv(ctx context.Context, f fs.Fs, dir string) error {
|
||||
entries, err := list.DirSorted(ctx, f, false, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return walkFunc(dir, entries, nil)
|
||||
}
|
||||
|
||||
func walkFunc(path string, entries fs.DirEntries, err error) error {
|
||||
fs.Debugf(path, "walking dir")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
oldName, newName, skip, err := parseEntry(x)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
fs.Debugf(x, "%v %v %v %v %v", Opt.ctx, Opt.f, Opt.f, newName, oldName)
|
||||
err = operations.MoveFile(Opt.ctx, Opt.f, Opt.f, newName, oldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case fs.Directory:
|
||||
oldName, newName, skip, err := parseEntry(x)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !skip { // still want to recurse during dry-runs to get accurate logs
|
||||
err = DirMoveCaseInsensitive(Opt.ctx, Opt.f, oldName, newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
newName = oldName // otherwise dry-runs won't be able to find it
|
||||
}
|
||||
// recurse, calling it by its new name
|
||||
err = walkConv(Opt.ctx, Opt.f, newName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertPath converts a path string according to the chosen ConvertAlgo.
|
||||
// Each path segment is converted separately, to preserve path separators.
|
||||
// If baseOnly is true, only the base will be converted (useful for renaming while walking a dir tree recursively.)
|
||||
// for example, "some/nested/path" -> "some/nested/CONVERTEDPATH"
|
||||
// otherwise, the entire is path is converted.
|
||||
func ConvertPath(s string, ConvertAlgo Convert, baseOnly bool) (string, error) {
|
||||
if s == "" || s == "/" || s == "\\" || s == "." {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if baseOnly {
|
||||
convertedBase, err := ConvertPathSegment(filepath.Base(s), ConvertAlgo)
|
||||
return filepath.Join(filepath.Dir(s), convertedBase), err
|
||||
}
|
||||
|
||||
segments := strings.Split(s, string(os.PathSeparator))
|
||||
convertedSegments := make([]string, len(segments))
|
||||
for _, seg := range segments {
|
||||
convSeg, err := ConvertPathSegment(seg, ConvertAlgo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
convertedSegments = append(convertedSegments, convSeg)
|
||||
}
|
||||
return filepath.Join(convertedSegments...), nil
|
||||
}
|
||||
|
||||
// ConvertPathSegment converts one path segment (or really any string) according to the chosen ConvertAlgo.
|
||||
// It assumes path separators have already been trimmed.
|
||||
func ConvertPathSegment(s string, ConvertAlgo Convert) (string, error) {
|
||||
fs.Debugf(s, "converting")
|
||||
switch ConvertAlgo {
|
||||
case ConvNone:
|
||||
return s, nil
|
||||
case ConvToNFC:
|
||||
return norm.NFC.String(s), nil
|
||||
case ConvToNFD:
|
||||
return norm.NFD.String(s), nil
|
||||
case ConvToNFKC:
|
||||
return norm.NFKC.String(s), nil
|
||||
case ConvToNFKD:
|
||||
return norm.NFKD.String(s), nil
|
||||
case ConvBase64Encode:
|
||||
return base64.URLEncoding.EncodeToString([]byte(s)), nil // URLEncoding to avoid slashes
|
||||
case ConvBase64Decode:
|
||||
if s == ".DS_Store" {
|
||||
return s, nil
|
||||
}
|
||||
b, err := base64.URLEncoding.DecodeString(s)
|
||||
return string(b), err
|
||||
case ConvFindReplace:
|
||||
oldNews := []string{}
|
||||
for _, pair := range Opt.FindReplace {
|
||||
split := strings.Split(pair, ",")
|
||||
oldNews = append(oldNews, split...)
|
||||
}
|
||||
replacer := strings.NewReplacer(oldNews...)
|
||||
return replacer.Replace(s), nil
|
||||
case ConvPrefix:
|
||||
return Opt.Prefix + s, nil
|
||||
case ConvSuffix:
|
||||
return s + Opt.Suffix, nil
|
||||
case ConvTrimPrefix:
|
||||
return strings.TrimPrefix(s, Opt.Prefix), nil
|
||||
case ConvTrimSuffix:
|
||||
return strings.TrimSuffix(s, Opt.Suffix), nil
|
||||
case ConvTruncate:
|
||||
if Opt.Max <= 0 {
|
||||
return s, nil
|
||||
}
|
||||
if utf8.RuneCountInString(s) <= Opt.Max {
|
||||
return s, nil
|
||||
}
|
||||
runes := []rune(s)
|
||||
return string(runes[:Opt.Max]), nil
|
||||
case ConvEncoder:
|
||||
return Opt.Enc.Encode(s), nil
|
||||
case ConvDecoder:
|
||||
return Opt.Enc.Decode(s), nil
|
||||
case ConvISO8859_1:
|
||||
return encodeWithReplacement(s, charmap.ISO8859_1), nil
|
||||
case ConvWindows1252:
|
||||
return encodeWithReplacement(s, charmap.Windows1252), nil
|
||||
case ConvMacintosh:
|
||||
return encodeWithReplacement(s, charmap.Macintosh), nil
|
||||
case ConvCharmap:
|
||||
return encodeWithReplacement(s, Opt.Cmap), nil
|
||||
case ConvLowercase:
|
||||
return strings.ToLower(s), nil
|
||||
case ConvUppercase:
|
||||
return strings.ToUpper(s), nil
|
||||
case ConvTitlecase:
|
||||
return strings.ToTitle(s), nil
|
||||
case ConvASCII:
|
||||
return toASCII(s), nil
|
||||
default:
|
||||
return "", errors.New("this option is not yet implemented")
|
||||
}
|
||||
}
|
||||
|
||||
func parseEntry(e fs.DirEntry) (oldName, newName string, skip bool, err error) {
|
||||
oldName = e.Remote()
|
||||
newName, err = ConvertPath(oldName, Opt.ConvertAlgo, true)
|
||||
if err != nil {
|
||||
fs.Errorf(oldName, "error converting: %v", err)
|
||||
return oldName, newName, true, err
|
||||
}
|
||||
if oldName == newName {
|
||||
fs.Debugf(oldName, "name is already correct - skipping")
|
||||
return oldName, newName, true, nil
|
||||
}
|
||||
skip = operations.SkipDestructive(Opt.ctx, oldName, "rename to "+newName)
|
||||
return oldName, newName, skip, nil
|
||||
}
|
||||
|
||||
// DirMoveCaseInsensitive does DirMove in two steps (to temp name, then real name)
|
||||
// which is necessary for some case-insensitive backends
|
||||
func DirMoveCaseInsensitive(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
tmpDstRemote := dstRemote + "-rclone-move-" + random.String(8)
|
||||
err = operations.DirMove(ctx, f, srcRemote, tmpDstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return operations.DirMove(ctx, f, tmpDstRemote, dstRemote)
|
||||
}
|
||||
|
||||
func encodeWithReplacement(s string, cmap *charmap.Charmap) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
b, ok := cmap.EncodeRune(r)
|
||||
if !ok {
|
||||
return '_'
|
||||
}
|
||||
return cmap.DecodeByte(b)
|
||||
}, s)
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if r <= 127 {
|
||||
return r
|
||||
}
|
||||
return -1
|
||||
}, s)
|
||||
}
|
||||
|
||||
func sprintList() string {
|
||||
var out strings.Builder
|
||||
|
||||
_, _ = out.WriteString(`### Conversion modes
|
||||
|
||||
The conversion mode |-t| or |--conv| flag must be specified. This
|
||||
defines what transformation the |convmv| command will make.
|
||||
|
||||
`)
|
||||
for _, v := range Opt.ConvertAlgo.Choices() {
|
||||
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
|
||||
}
|
||||
_, _ = out.WriteRune('\n')
|
||||
|
||||
_, _ = out.WriteString(`### Char maps
|
||||
|
||||
These are the choices for the |--charmap| flag.
|
||||
|
||||
`)
|
||||
for _, v := range Opt.CmapFlag.Choices() {
|
||||
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
|
||||
}
|
||||
_, _ = out.WriteRune('\n')
|
||||
|
||||
_, _ = out.WriteString(`### Encoding masks
|
||||
|
||||
These are the valid options for the --encoding flag.
|
||||
|
||||
`)
|
||||
for _, v := range strings.Split(encoder.ValidStrings(), ", ") {
|
||||
_, _ = fmt.Fprintf(&out, "- `%s`\n", v)
|
||||
}
|
||||
_, _ = out.WriteRune('\n')
|
||||
|
||||
sprintExamples(&out)
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func printList() {
|
||||
fmt.Println(sprintList())
|
||||
}
|
||||
|
||||
87
cmd/convmv/convmv_examples.go
Normal file
87
cmd/convmv/convmv_examples.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package convmv
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
type example struct {
|
||||
Opt ConvOpt
|
||||
Path string
|
||||
}
|
||||
|
||||
var examples = []example{
|
||||
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvUppercase}},
|
||||
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvFindReplace, FindReplace: []string{"Fox,Turtle", "Quick,Slow"}}},
|
||||
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvBase64Encode}},
|
||||
{Path: `c3Rvcmllcw==/VGhlIFF1aWNrIEJyb3duIEZveCEudHh0`, Opt: ConvOpt{ConvertAlgo: ConvBase64Decode}},
|
||||
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvToNFC}},
|
||||
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvToNFD}},
|
||||
{Path: `stories/The Quick Brown 🦊 Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvASCII}},
|
||||
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvTrimSuffix, Suffix: ".txt"}},
|
||||
{Path: `stories/The Quick Brown Fox!.txt`, Opt: ConvOpt{ConvertAlgo: ConvPrefix, Prefix: "OLD_"}},
|
||||
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvCharmap, CmapFlag: 20}},
|
||||
{Path: `stories/The Quick Brown Fox: A Memoir [draft].txt`, Opt: ConvOpt{ConvertAlgo: ConvEncoder, Enc: encoder.EncodeColon | encoder.EncodeSquareBracket}},
|
||||
{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvTruncate, Max: 21}},
|
||||
}
|
||||
|
||||
func (e example) command() string {
|
||||
s := fmt.Sprintf(`rclone convmv %q -t %s`, e.Path, e.Opt.ConvertAlgo)
|
||||
switch e.Opt.ConvertAlgo {
|
||||
case ConvFindReplace:
|
||||
for _, r := range e.Opt.FindReplace {
|
||||
s += fmt.Sprintf(` -r %q`, r)
|
||||
}
|
||||
case ConvTrimPrefix, ConvPrefix:
|
||||
s += fmt.Sprintf(` --prefix %q`, e.Opt.Prefix)
|
||||
case ConvTrimSuffix, ConvSuffix:
|
||||
s += fmt.Sprintf(` --suffix %q`, e.Opt.Suffix)
|
||||
case ConvCharmap:
|
||||
s += fmt.Sprintf(` --charmap %q`, e.Opt.CmapFlag.String())
|
||||
case ConvEncoder:
|
||||
s += fmt.Sprintf(` --encoding %q`, e.Opt.Enc.String())
|
||||
case ConvTruncate:
|
||||
s += fmt.Sprintf(` --max %d`, e.Opt.Max)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e example) output() string {
|
||||
_ = e.Opt.validate()
|
||||
Opt = e.Opt
|
||||
s, err := ConvertPath(e.Path, e.Opt.ConvertAlgo, false)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "error: %v", err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// go run ./ convmv --help
|
||||
func sprintExamples(out *strings.Builder) {
|
||||
_, _ = fmt.Fprintf(out, `### Examples:
|
||||
|
||||
Here are some examples of rclone convmv in action.
|
||||
|
||||
`)
|
||||
for _, e := range examples {
|
||||
_, _ = fmt.Fprintf(out, "```\n%s\n", e.command())
|
||||
_, _ = fmt.Fprintf(out, "// Output: %s\n```\n\n", e.output())
|
||||
}
|
||||
Opt = ConvOpt{} // reset
|
||||
}
|
||||
|
||||
/* func sprintAllCharmapExamples() string {
|
||||
s := ""
|
||||
e := example{Path: `stories/The Quick Brown 🦊 Fox Went to the Café!.txt`, Opt: ConvOpt{ConvertAlgo: ConvCharmap, CmapFlag: 0}}
|
||||
for i := range Cmaps {
|
||||
e.Opt.CmapFlag++
|
||||
_ = e.Opt.validate()
|
||||
Opt = e.Opt
|
||||
s += fmt.Sprintf("%d Command: %s \n", i, e.command())
|
||||
s += fmt.Sprintf("Result: %s \n\n", e.output())
|
||||
}
|
||||
return s
|
||||
} */
|
||||
@@ -5,22 +5,22 @@ import (
|
||||
"cmp"
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // import all backends
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/transform"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// Some times used in the tests
|
||||
@@ -34,114 +34,62 @@ func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestTransform(t *testing.T) {
|
||||
func TestConvmv(t *testing.T) {
|
||||
type args struct {
|
||||
TransformOpt []string
|
||||
TransformBackOpt []string
|
||||
Lossless bool // whether the TransformBackAlgo is always losslessly invertible
|
||||
ConvertAlgo fs.Enum[convertChoices]
|
||||
ConvertBackAlgo fs.Enum[convertChoices]
|
||||
Lossless bool // whether the ConvertBackAlgo is always losslessly invertible
|
||||
ExtraOpt ConvOpt
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
}{
|
||||
{name: "NFC", args: args{
|
||||
TransformOpt: []string{"all,nfc"},
|
||||
TransformBackOpt: []string{"all,nfd"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "NFD", args: args{
|
||||
TransformOpt: []string{"all,nfd"},
|
||||
TransformBackOpt: []string{"all,nfc"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "base64", args: args{
|
||||
TransformOpt: []string{"all,base64encode"},
|
||||
TransformBackOpt: []string{"all,base64encode"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "prefix", args: args{
|
||||
TransformOpt: []string{"all,prefix=PREFIX"},
|
||||
TransformBackOpt: []string{"all,trimprefix=PREFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "suffix", args: args{
|
||||
TransformOpt: []string{"all,suffix=SUFFIX"},
|
||||
TransformBackOpt: []string{"all,trimsuffix=SUFFIX"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "truncate", args: args{
|
||||
TransformOpt: []string{"all,truncate=10"},
|
||||
TransformBackOpt: []string{"all,truncate=10"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "encoder", args: args{
|
||||
TransformOpt: []string{"all,encoder=Colon,SquareBracket"},
|
||||
TransformBackOpt: []string{"all,decoder=Colon,SquareBracket"},
|
||||
Lossless: true,
|
||||
}},
|
||||
{name: "ISO-8859-1", args: args{
|
||||
TransformOpt: []string{"all,ISO-8859-1"},
|
||||
TransformBackOpt: []string{"all,ISO-8859-1"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "charmap", args: args{
|
||||
TransformOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
TransformBackOpt: []string{"all,charmap=ISO-8859-7"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "lowercase", args: args{
|
||||
TransformOpt: []string{"all,lowercase"},
|
||||
TransformBackOpt: []string{"all,lowercase"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "ascii", args: args{
|
||||
TransformOpt: []string{"all,ascii"},
|
||||
TransformBackOpt: []string{"all,ascii"},
|
||||
Lossless: false,
|
||||
}},
|
||||
{name: "NFC", args: args{ConvertAlgo: ConvToNFC, ConvertBackAlgo: ConvToNFD, Lossless: false}},
|
||||
{name: "NFD", args: args{ConvertAlgo: ConvToNFD, ConvertBackAlgo: ConvToNFC, Lossless: false}},
|
||||
{name: "NFKC", args: args{ConvertAlgo: ConvToNFKC, ConvertBackAlgo: ConvToNFKD, Lossless: false}},
|
||||
{name: "NFKD", args: args{ConvertAlgo: ConvToNFKD, ConvertBackAlgo: ConvToNFKC, Lossless: false}},
|
||||
{name: "base64", args: args{ConvertAlgo: ConvBase64Encode, ConvertBackAlgo: ConvBase64Decode, Lossless: true}},
|
||||
{name: "replace", args: args{ConvertAlgo: ConvFindReplace, ConvertBackAlgo: ConvFindReplace, Lossless: true, ExtraOpt: ConvOpt{FindReplace: []string{"bread,banana", "pie,apple", "apple,pie", "banana,bread"}}}},
|
||||
{name: "prefix", args: args{ConvertAlgo: ConvPrefix, ConvertBackAlgo: ConvTrimPrefix, Lossless: true, ExtraOpt: ConvOpt{Prefix: "PREFIX"}}},
|
||||
{name: "suffix", args: args{ConvertAlgo: ConvSuffix, ConvertBackAlgo: ConvTrimSuffix, Lossless: true, ExtraOpt: ConvOpt{Suffix: "SUFFIX"}}},
|
||||
{name: "truncate", args: args{ConvertAlgo: ConvTruncate, ConvertBackAlgo: ConvTruncate, Lossless: false, ExtraOpt: ConvOpt{Max: 10}}},
|
||||
{name: "encoder", args: args{ConvertAlgo: ConvEncoder, ConvertBackAlgo: ConvDecoder, Lossless: true, ExtraOpt: ConvOpt{Enc: encoder.OS}}},
|
||||
{name: "ISO-8859-1", args: args{ConvertAlgo: ConvISO8859_1, ConvertBackAlgo: ConvISO8859_1, Lossless: false}},
|
||||
{name: "charmap", args: args{ConvertAlgo: ConvCharmap, ConvertBackAlgo: ConvCharmap, Lossless: false, ExtraOpt: ConvOpt{CmapFlag: 3}}},
|
||||
{name: "lowercase", args: args{ConvertAlgo: ConvLowercase, ConvertBackAlgo: ConvUppercase, Lossless: false}},
|
||||
{name: "ascii", args: args{ConvertAlgo: ConvASCII, ConvertBackAlgo: ConvASCII, Lossless: false}},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Flocal)
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
items := makeTestFiles(t, r, "dir1")
|
||||
err := r.Fremote.Mkdir(ctx, "empty/empty")
|
||||
require.NoError(t, err)
|
||||
err = r.Flocal.Mkdir(ctx, "empty/empty")
|
||||
require.NoError(t, err)
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
r.CheckLocalListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
r.CheckRemoteListing(t, items, nil)
|
||||
|
||||
err = transform.SetOptions(ctx, tt.args.TransformOpt...)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
Opt = tt.args.ExtraOpt
|
||||
Opt.ConvertAlgo = tt.args.ConvertAlgo
|
||||
err := Convmv(context.Background(), r.Fremote, "")
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, items)
|
||||
compareNames(t, r, items)
|
||||
|
||||
transformedItems := transformItems(ctx, t, items)
|
||||
r.CheckRemoteListing(t, transformedItems, []string{transform.Path(ctx, "dir1", true), transform.Path(ctx, "empty", true), transform.Path(ctx, "empty/empty", true)})
|
||||
err = transform.SetOptions(ctx, tt.args.TransformBackOpt...)
|
||||
require.NoError(t, err)
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
convertedItems := convertItems(t, items)
|
||||
Opt.ConvertAlgo = tt.args.ConvertBackAlgo
|
||||
err = Convmv(context.Background(), r.Fremote, "")
|
||||
assert.NoError(t, err)
|
||||
compareNames(ctx, t, r, transformedItems)
|
||||
compareNames(t, r, convertedItems)
|
||||
|
||||
if tt.args.Lossless {
|
||||
deleteDSStore(t, r)
|
||||
r.CheckRemoteListing(t, items, []string{"dir1", "empty", "empty/empty"})
|
||||
r.CheckRemoteItems(t, items...)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
|
||||
const alphabet = "abcdefg123456789Ü"
|
||||
const alphabet = "ƀɀɠʀҠԀڀڠݠހ߀ကႠᄀᄠᅀᆀᇠሀሠበዠጠᎠᏀᐠᑀᑠᒀᒠᓀᓠᔀᔠᕀᕠᖀᖠᗀᗠᘀᘠᙀᚠᛀកᠠᡀᣀᦀ᧠ᨠᯀᰀᴀ⇠⋀⍀⍠⎀⎠⏀␀─┠╀╠▀■◀◠☀☠♀♠⚀⚠⛀⛠✀✠❀➀➠⠀⠠⡀⡠⢀⢠⣀⣠⤀⤠⥀⥠⦠⨠⩀⪀⪠⫠⬀⬠⭀ⰀⲀⲠⳀⴀⵀ⺠⻀㇀㐀㐠㑀㑠㒀㒠㓀㓠㔀㔠㕀㕠㖀㖠㗀㗠㘀㘠㙀㙠㚀㚠㛀㛠㜀㜠㝀㝠㞀㞠㟀㟠㠀㠠㡀㡠㢀㢠㣀㣠㤀㤠㥀㥠㦀㦠㧀㧠㨀㨠㩀㩠㪀㪠㫀㫠㬀㬠㭀㭠㮀㮠㯀㯠㰀㰠㱀㱠㲀㲠㳀㳠㴀㴠㵀㵠㶀㶠㷀㷠㸀㸠㹀㹠㺀㺠㻀㻠㼀㼠㽀㽠㾀㾠㿀㿠䀀䀠䁀䁠䂀䂠䃀䃠䄀䄠䅀䅠䆀䆠䇀䇠䈀䈠䉀䉠䊀䊠䋀䋠䌀䌠䍀䍠䎀䎠䏀䏠䐀䐠䑀䑠䒀䒠䓀䓠䔀䔠䕀䕠䖀䖠䗀䗠䘀䘠䙀䙠䚀䚠䛀䛠䜀䜠䝀䝠䞀䞠䟀䟠䠀䠠䡀䡠䢀䢠䣀䣠䤀䤠䥀䥠䦀䦠䧀䧠䨀䨠䩀䩠䪀䪠䫀䫠䬀䬠䭀䭠䮀䮠䯀䯠䰀䰠䱀䱠䲀䲠䳀䳠䴀䴠䵀䵠䶀䷀䷠一丠乀习亀亠什仠伀传佀你侀侠俀俠倀倠偀偠傀傠僀僠儀儠兀兠冀冠净几刀删剀剠劀加勀勠匀匠區占厀厠叀叠吀吠呀呠咀咠哀哠唀唠啀啠喀喠嗀嗠嘀嘠噀噠嚀嚠囀因圀圠址坠垀垠埀埠堀堠塀塠墀墠壀壠夀夠奀奠妀妠姀姠娀娠婀婠媀媠嫀嫠嬀嬠孀孠宀宠寀寠尀尠局屠岀岠峀峠崀崠嵀嵠嶀嶠巀巠帀帠幀幠庀庠廀廠开张彀彠往徠忀忠怀怠恀恠悀悠惀惠愀愠慀慠憀憠懀懠戀戠所扠技抠拀拠挀挠捀捠掀掠揀揠搀搠摀摠撀撠擀擠攀攠敀敠斀斠旀无昀映晀晠暀暠曀曠最朠杀杠枀枠柀柠栀栠桀桠梀梠检棠椀椠楀楠榀榠槀槠樀樠橀橠檀檠櫀櫠欀欠歀歠殀殠毀毠氀氠汀池沀沠泀泠洀洠浀浠涀涠淀淠渀渠湀湠満溠滀滠漀漠潀潠澀澠激濠瀀瀠灀灠炀炠烀烠焀焠煀煠熀熠燀燠爀爠牀牠犀犠狀狠猀猠獀獠玀玠珀珠琀琠瑀瑠璀璠瓀瓠甀甠畀畠疀疠痀痠瘀瘠癀癠皀皠盀盠眀眠着睠瞀瞠矀矠砀砠础硠碀碠磀磠礀礠祀祠禀禠秀秠稀稠穀穠窀窠竀章笀笠筀筠简箠節篠簀簠籀籠粀粠糀糠紀素絀絠綀綠緀締縀縠繀繠纀纠绀绠缀缠罀罠羀羠翀翠耀耠聀聠肀肠胀胠脀脠腀腠膀膠臀臠舀舠艀艠芀芠苀苠茀茠荀荠莀莠菀菠萀萠葀葠蒀蒠蓀蓠蔀蔠蕀蕠薀薠藀藠蘀蘠虀虠蚀蚠蛀蛠蜀蜠蝀蝠螀螠蟀蟠蠀蠠血衠袀袠裀裠褀褠襀襠覀覠觀觠言訠詀詠誀誠諀諠謀謠譀譠讀讠诀诠谀谠豀豠貀負賀賠贀贠赀赠趀趠跀跠踀踠蹀蹠躀躠軀軠輀輠轀轠辀辠迀迠退造遀遠邀邠郀郠鄀鄠酀酠醀醠釀釠鈀鈠鉀鉠銀銠鋀鋠錀錠鍀鍠鎀鎠鏀鏠鐀鐠鑀鑠钀钠铀铠销锠镀镠門閠闀闠阀阠陀陠隀隠雀雠需霠靀靠鞀鞠韀韠頀頠顀顠颀颠飀飠餀餠饀饠馀馠駀駠騀騠驀驠骀骠髀髠鬀鬠魀魠鮀鮠鯀鯠鰀鰠鱀鱠鲀鲠鳀鳠鴀鴠鵀鵠鶀鶠鷀鷠鸀鸠鹀鹠麀麠黀黠鼀鼠齀齠龀龠ꀀꀠꁀꁠꂀꂠꃀꃠꄀꄠꅀꅠꆀꆠꇀꇠꈀꈠꉀꉠꊀꊠꋀꋠꌀꌠꍀꍠꎀꎠꏀꏠꐀꐠꑀꑠ꒠ꔀꔠꕀꕠꖀꖠꗀꗠꙀꚠꛀ꜀꜠ꝀꞀꡀ測試_Русский___ě_áñ"
|
||||
|
||||
var extras = []string{"apple", "banana", "appleappleapplebanana", "splitbananasplit"}
|
||||
|
||||
@@ -152,19 +100,17 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
items := []fstest.Item{}
|
||||
for _, c := range alphabet {
|
||||
var out strings.Builder
|
||||
for i := rune(0); i < 7; i++ {
|
||||
for i := rune(0); i < 32; i++ {
|
||||
out.WriteRune(c + i)
|
||||
}
|
||||
fileName := path.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||
fileName = strings.ToValidUTF8(fileName, "")
|
||||
fileName = strings.NewReplacer(":", "", "<", "", ">", "", "?", "").Replace(fileName) // remove characters illegal on windows
|
||||
|
||||
if debug != "" {
|
||||
fileName = debug
|
||||
}
|
||||
|
||||
item := r.WriteObject(context.Background(), fileName, fileName, t1)
|
||||
r.WriteFile(fileName, fileName, t1)
|
||||
items = append(items, item)
|
||||
n++
|
||||
|
||||
@@ -175,7 +121,6 @@ func makeTestFiles(t *testing.T, r *fstest.Run, dir string) []fstest.Item {
|
||||
|
||||
for _, extra := range extras {
|
||||
item := r.WriteObject(context.Background(), extra, extra, t1)
|
||||
r.WriteFile(extra, extra, t1)
|
||||
items = append(items, item)
|
||||
}
|
||||
|
||||
@@ -192,7 +137,7 @@ func deleteDSStore(t *testing.T, r *fstest.Run) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fstest.Item) {
|
||||
func compareNames(t *testing.T, r *fstest.Run, items []fstest.Item) {
|
||||
var entries fs.DirEntries
|
||||
|
||||
deleteDSStore(t, r)
|
||||
@@ -213,8 +158,10 @@ func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fste
|
||||
|
||||
// sort by CONVERTED name
|
||||
slices.SortStableFunc(items, func(a, b fstest.Item) int {
|
||||
aConv := transform.Path(ctx, a.Path, false)
|
||||
bConv := transform.Path(ctx, b.Path, false)
|
||||
aConv, err := ConvertPath(a.Path, Opt.ConvertAlgo, false)
|
||||
require.NoError(t, err, a.Path)
|
||||
bConv, err := ConvertPath(b.Path, Opt.ConvertAlgo, false)
|
||||
require.NoError(t, err, b.Path)
|
||||
return cmp.Compare(aConv, bConv)
|
||||
})
|
||||
slices.SortStableFunc(entries, func(a, b fs.DirEntry) int {
|
||||
@@ -222,21 +169,23 @@ func compareNames(ctx context.Context, t *testing.T, r *fstest.Run, items []fste
|
||||
})
|
||||
|
||||
for i, e := range entries {
|
||||
expect := transform.Path(ctx, items[i].Path, false)
|
||||
expect, err := ConvertPath(items[i].Path, Opt.ConvertAlgo, false)
|
||||
assert.NoError(t, err)
|
||||
msg := fmt.Sprintf("expected %v, got %v", detectEncoding(expect), detectEncoding(e.Remote()))
|
||||
assert.Equal(t, expect, e.Remote(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
func transformItems(ctx context.Context, t *testing.T, items []fstest.Item) []fstest.Item {
|
||||
transformedItems := []fstest.Item{}
|
||||
func convertItems(t *testing.T, items []fstest.Item) []fstest.Item {
|
||||
convertedItems := []fstest.Item{}
|
||||
for _, item := range items {
|
||||
newPath := transform.Path(ctx, item.Path, false)
|
||||
newPath, err := ConvertPath(item.Path, Opt.ConvertAlgo, false)
|
||||
assert.NoError(t, err)
|
||||
newItem := item
|
||||
newItem.Path = newPath
|
||||
transformedItems = append(transformedItems, newItem)
|
||||
convertedItems = append(convertedItems, newItem)
|
||||
}
|
||||
return transformedItems
|
||||
return convertedItems
|
||||
}
|
||||
|
||||
func detectEncoding(s string) string {
|
||||
@@ -251,25 +200,3 @@ func detectEncoding(s string) string {
|
||||
}
|
||||
return "OTHER"
|
||||
}
|
||||
|
||||
func TestUnicodeEquivalence(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
ctx := context.Background()
|
||||
r.Mkdir(ctx, r.Fremote)
|
||||
const remote = "Über"
|
||||
item := r.WriteObject(ctx, remote, "", t1)
|
||||
|
||||
obj, err := r.Fremote.NewObject(ctx, remote) // can't use r.CheckRemoteListing here as it forces NFC
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, obj)
|
||||
|
||||
err = transform.SetOptions(ctx, "all,nfc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = sync.Transform(ctx, r.Fremote, true, true)
|
||||
assert.NoError(t, err)
|
||||
item.Path = norm.NFC.String(item.Path)
|
||||
r.CheckRemoteListing(t, []fstest.Item{item}, nil)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ Setting |--auto-filename| will attempt to automatically determine the
|
||||
filename from the URL (after any redirections) and used in the
|
||||
destination path.
|
||||
|
||||
With |--header-filename| in addition, if a specific filename is
|
||||
With |--auto-filename-header| in addition, if a specific filename is
|
||||
set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With |--print-filename| in addition, the resulting file name will be
|
||||
printed.
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
package gitannex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
type configID int
|
||||
|
||||
const (
|
||||
configRemoteName configID = iota
|
||||
configPrefix
|
||||
configLayout
|
||||
)
|
||||
|
||||
// configDefinition describes a configuration value required by this command. We
|
||||
// use "GETCONFIG" messages to query git-annex for these values at runtime.
|
||||
type configDefinition struct {
|
||||
id configID
|
||||
names []string
|
||||
description string
|
||||
defaultValue string
|
||||
}
|
||||
|
||||
const (
|
||||
defaultRclonePrefix = "git-annex-rclone"
|
||||
defaultRcloneLayout = "nodir"
|
||||
)
|
||||
|
||||
var requiredConfigs = []configDefinition{
|
||||
{
|
||||
id: configRemoteName,
|
||||
names: []string{"rcloneremotename", "target"},
|
||||
description: "Name of the rclone remote to use. " +
|
||||
"Must match a remote known to rclone. " +
|
||||
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
|
||||
},
|
||||
{
|
||||
id: configPrefix,
|
||||
names: []string{"rcloneprefix", "prefix"},
|
||||
description: "Directory where rclone will write git-annex content. " +
|
||||
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
|
||||
"This directory will be created on init if it does not exist.",
|
||||
defaultValue: defaultRclonePrefix,
|
||||
},
|
||||
{
|
||||
id: configLayout,
|
||||
names: []string{"rclonelayout", "rclone_layout"},
|
||||
description: "Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
|
||||
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
|
||||
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
|
||||
defaultValue: defaultRcloneLayout,
|
||||
},
|
||||
}
|
||||
|
||||
func (c *configDefinition) getCanonicalName() string {
|
||||
if len(c.names) < 1 {
|
||||
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
|
||||
}
|
||||
return c.names[0]
|
||||
}
|
||||
|
||||
// fullDescription returns a single-line, human-readable description for this
|
||||
// config. The returned string begins with a list of synonyms and ends with
|
||||
// `c.description`.
|
||||
func (c *configDefinition) fullDescription() string {
|
||||
if len(c.names) <= 1 {
|
||||
return c.description
|
||||
}
|
||||
// Exclude the canonical name from the list of synonyms.
|
||||
synonyms := c.names[1:len(c.names)]
|
||||
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
|
||||
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
|
||||
}
|
||||
|
||||
// validateRemoteName validates the "rcloneremotename" config that we receive
|
||||
// from git-annex. It returns nil iff `value` is valid. Otherwise, it returns a
|
||||
// descriptive error suitable for sending back to git-annex via stdout.
|
||||
//
|
||||
// The value is only valid when:
|
||||
// 1. It is the exact name of an existing remote.
|
||||
// 2. It is an fspath string that names an existing remote or a backend. The
|
||||
// string may include options, but it must not include a path. (That's what
|
||||
// the "rcloneprefix" config is for.)
|
||||
//
|
||||
// While backends are not remote names, per se, they are permitted for
|
||||
// compatibility with [fstest]. We could guard this behavior behind
|
||||
// [testing.Testing] to prevent users from specifying backend strings, but
|
||||
// there's no obvious harm in permitting it.
|
||||
func validateRemoteName(value string) error {
|
||||
remoteNames := config.GetRemoteNames()
|
||||
// Check whether `value` is an exact match for an existing remote.
|
||||
//
|
||||
// If we checked whether [cache.Get] returns [fs.ErrorNotFoundInConfigFile],
|
||||
// we would incorrectly identify file names as valid remote names. We also
|
||||
// avoid [config.FileSections] because it will miss remotes that are defined
|
||||
// by environment variables.
|
||||
if slices.Contains(remoteNames, value) {
|
||||
return nil
|
||||
}
|
||||
parsed, err := fspath.Parse(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("remote could not be parsed: %s", value)
|
||||
}
|
||||
if parsed.Path != "" {
|
||||
return fmt.Errorf("remote does not exist or incorrectly contains a path: %s", value)
|
||||
}
|
||||
// Now that we've established `value` is an fspath string that does not
|
||||
// include a path component, we only need to check whether it names an
|
||||
// existing remote or backend.
|
||||
if slices.Contains(remoteNames, parsed.Name) {
|
||||
return nil
|
||||
}
|
||||
maybeBackend := strings.HasPrefix(value, ":")
|
||||
if !maybeBackend {
|
||||
return fmt.Errorf("remote does not exist: %s", value)
|
||||
}
|
||||
// Strip the leading colon before searching for the backend. For instance,
|
||||
// search for "local" instead of ":local". Note that `parsed.Name` already
|
||||
// omits any config options baked into the string.
|
||||
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
|
||||
if _, err = fs.Find(trimmedBackendName); err != nil {
|
||||
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -28,11 +28,14 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -107,6 +110,35 @@ func (m *messageParser) finalParameter() string {
|
||||
return param
|
||||
}
|
||||
|
||||
// configDefinition describes a configuration value required by this command. We
|
||||
// use "GETCONFIG" messages to query git-annex for these values at runtime.
|
||||
type configDefinition struct {
|
||||
names []string
|
||||
description string
|
||||
destination *string
|
||||
defaultValue *string
|
||||
}
|
||||
|
||||
func (c *configDefinition) getCanonicalName() string {
|
||||
if len(c.names) < 1 {
|
||||
panic(fmt.Errorf("configDefinition must have at least one name: %v", c))
|
||||
}
|
||||
return c.names[0]
|
||||
}
|
||||
|
||||
// fullDescription returns a single-line, human-readable description for this
|
||||
// config. The returned string begins with a list of synonyms and ends with
|
||||
// `c.description`.
|
||||
func (c *configDefinition) fullDescription() string {
|
||||
if len(c.names) <= 1 {
|
||||
return c.description
|
||||
}
|
||||
// Exclude the canonical name from the list of synonyms.
|
||||
synonyms := c.names[1:len(c.names)]
|
||||
commaSeparatedSynonyms := strings.Join(synonyms, ", ")
|
||||
return fmt.Sprintf("(synonyms: %s) %s", commaSeparatedSynonyms, c.description)
|
||||
}
|
||||
|
||||
// server contains this command's current state.
|
||||
type server struct {
|
||||
reader *bufio.Reader
|
||||
@@ -242,31 +274,81 @@ func (s *server) handleInitRemote() error {
|
||||
return fmt.Errorf("failed to get configs: %w", err)
|
||||
}
|
||||
|
||||
if err := validateRemoteName(s.configRcloneRemoteName); err != nil {
|
||||
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
|
||||
return fmt.Errorf("failed to init remote: %w", err)
|
||||
// Explicitly check whether [server.configRcloneRemoteName] names a remote.
|
||||
//
|
||||
// - We do not permit file paths in the remote name; that's what
|
||||
// [s.configPrefix] is for. If we simply checked whether [cache.Get]
|
||||
// returns [fs.ErrorNotFoundInConfigFile], we would incorrectly identify
|
||||
// file names as valid remote names.
|
||||
//
|
||||
// - In order to support remotes defined by environment variables, we must
|
||||
// use [config.GetRemoteNames] instead of [config.FileSections].
|
||||
trimmedName := strings.TrimSuffix(s.configRcloneRemoteName, ":")
|
||||
if slices.Contains(config.GetRemoteNames(), trimmedName) {
|
||||
s.sendMsg("INITREMOTE-SUCCESS")
|
||||
return nil
|
||||
}
|
||||
|
||||
if mode := parseLayoutMode(s.configRcloneLayout); mode == layoutModeUnknown {
|
||||
err := fmt.Errorf("unknown layout mode: %s", s.configRcloneLayout)
|
||||
s.sendMsg(fmt.Sprintf("INITREMOTE-FAILURE %s", err))
|
||||
return fmt.Errorf("failed to init remote: %w", err)
|
||||
// Otherwise, check whether [server.configRcloneRemoteName] is actually a
|
||||
// backend string such as ":local:". These are not remote names, per se, but
|
||||
// they are permitted for compatibility with [fstest]. We could guard this
|
||||
// behavior behind [testing.Testing] to prevent users from specifying
|
||||
// backend strings, but there's no obvious harm in permitting it.
|
||||
maybeBackend := strings.HasPrefix(s.configRcloneRemoteName, ":")
|
||||
if !maybeBackend {
|
||||
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
|
||||
return fmt.Errorf("remote does not exist: %s", s.configRcloneRemoteName)
|
||||
}
|
||||
parsed, err := fspath.Parse(s.configRcloneRemoteName)
|
||||
if err != nil {
|
||||
s.sendMsg("INITREMOTE-FAILURE remote could not be parsed as a backend: " + s.configRcloneRemoteName)
|
||||
return fmt.Errorf("remote could not be parsed as a backend: %s", s.configRcloneRemoteName)
|
||||
}
|
||||
if parsed.Path != "" {
|
||||
s.sendMsg("INITREMOTE-FAILURE backend must not have a path: " + s.configRcloneRemoteName)
|
||||
return fmt.Errorf("backend must not have a path: %s", s.configRcloneRemoteName)
|
||||
}
|
||||
// Strip the leading colon and options before searching for the backend,
|
||||
// i.e. search for "local" instead of ":local,description=hello:/tmp/foo".
|
||||
trimmedBackendName := strings.TrimPrefix(parsed.Name, ":")
|
||||
if _, err = fs.Find(trimmedBackendName); err != nil {
|
||||
s.sendMsg("INITREMOTE-FAILURE backend does not exist: " + trimmedBackendName)
|
||||
return fmt.Errorf("backend does not exist: %s", trimmedBackendName)
|
||||
}
|
||||
|
||||
s.sendMsg("INITREMOTE-SUCCESS")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *server) mustSetConfigValue(id configID, value string) {
|
||||
switch id {
|
||||
case configRemoteName:
|
||||
s.configRcloneRemoteName = value
|
||||
case configPrefix:
|
||||
s.configPrefix = value
|
||||
case configLayout:
|
||||
s.configRcloneLayout = value
|
||||
default:
|
||||
panic(fmt.Errorf("unhandled configId: %v", id))
|
||||
// Get a list of configs with pointers to fields of `s`.
|
||||
func (s *server) getRequiredConfigs() []configDefinition {
|
||||
defaultRclonePrefix := "git-annex-rclone"
|
||||
defaultRcloneLayout := "nodir"
|
||||
|
||||
return []configDefinition{
|
||||
{
|
||||
[]string{"rcloneremotename", "target"},
|
||||
"Name of the rclone remote to use. " +
|
||||
"Must match a remote known to rclone. " +
|
||||
"(Note that rclone remotes are a distinct concept from git-annex remotes.)",
|
||||
&s.configRcloneRemoteName,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[]string{"rcloneprefix", "prefix"},
|
||||
"Directory where rclone will write git-annex content. " +
|
||||
fmt.Sprintf("If not specified, defaults to %q. ", defaultRclonePrefix) +
|
||||
"This directory will be created on init if it does not exist.",
|
||||
&s.configPrefix,
|
||||
&defaultRclonePrefix,
|
||||
},
|
||||
{
|
||||
[]string{"rclonelayout", "rclone_layout"},
|
||||
"Defines where, within the rcloneprefix directory, rclone will write git-annex content. " +
|
||||
fmt.Sprintf("Must be one of %v. ", allLayoutModes()) +
|
||||
fmt.Sprintf("If empty, defaults to %q.", defaultRcloneLayout),
|
||||
&s.configRcloneLayout,
|
||||
&defaultRcloneLayout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,8 +360,8 @@ func (s *server) queryConfigs() error {
|
||||
|
||||
// Send a "GETCONFIG" message for each required config and parse git-annex's
|
||||
// "VALUE" response.
|
||||
queryNextConfig:
|
||||
for _, config := range requiredConfigs {
|
||||
for _, config := range s.getRequiredConfigs() {
|
||||
var valueReceived bool
|
||||
// Try each of the config's names in sequence, starting with the
|
||||
// canonical name.
|
||||
for _, configName := range config.names {
|
||||
@@ -295,15 +377,19 @@ queryNextConfig:
|
||||
return fmt.Errorf("failed to parse config value: %s %s", valueKeyword, message.line)
|
||||
}
|
||||
|
||||
if value := message.finalParameter(); value != "" {
|
||||
s.mustSetConfigValue(config.id, value)
|
||||
continue queryNextConfig
|
||||
value := message.finalParameter()
|
||||
if value != "" {
|
||||
*config.destination = value
|
||||
valueReceived = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if config.defaultValue == "" {
|
||||
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
|
||||
if !valueReceived {
|
||||
if config.defaultValue == nil {
|
||||
return fmt.Errorf("did not receive a non-empty config value for %q", config.getCanonicalName())
|
||||
}
|
||||
*config.destination = *config.defaultValue
|
||||
}
|
||||
s.mustSetConfigValue(config.id, config.defaultValue)
|
||||
}
|
||||
|
||||
s.configsDone = true
|
||||
@@ -322,7 +408,7 @@ func (s *server) handlePrepare() error {
|
||||
// Git-annex is asking us to return the list of settings that we use. Keep this
|
||||
// in sync with `handlePrepare()`.
|
||||
func (s *server) handleListConfigs() {
|
||||
for _, config := range requiredConfigs {
|
||||
for _, config := range s.getRequiredConfigs() {
|
||||
s.sendMsg(fmt.Sprintf("CONFIG %s %s", config.getCanonicalName(), config.fullDescription()))
|
||||
}
|
||||
s.sendMsg("CONFIGEND")
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -190,10 +191,14 @@ func TestMessageParser(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigDefinitionOneName(t *testing.T) {
|
||||
var parsed string
|
||||
var defaultValue = "abc"
|
||||
|
||||
configFoo := configDefinition{
|
||||
names: []string{"foo"},
|
||||
description: "The foo config is utterly useless.",
|
||||
defaultValue: "abc",
|
||||
destination: &parsed,
|
||||
defaultValue: &defaultValue,
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo",
|
||||
@@ -205,10 +210,14 @@ func TestConfigDefinitionOneName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigDefinitionTwoNames(t *testing.T) {
|
||||
var parsed string
|
||||
var defaultValue = "abc"
|
||||
|
||||
configFoo := configDefinition{
|
||||
names: []string{"foo", "bar"},
|
||||
description: "The foo config is utterly useless.",
|
||||
defaultValue: "abc",
|
||||
destination: &parsed,
|
||||
defaultValue: &defaultValue,
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo",
|
||||
@@ -220,10 +229,14 @@ func TestConfigDefinitionTwoNames(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigDefinitionThreeNames(t *testing.T) {
|
||||
var parsed string
|
||||
var defaultValue = "abc"
|
||||
|
||||
configFoo := configDefinition{
|
||||
names: []string{"foo", "bar", "baz"},
|
||||
description: "The foo config is utterly useless.",
|
||||
defaultValue: "abc",
|
||||
destination: &parsed,
|
||||
defaultValue: &defaultValue,
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo",
|
||||
@@ -239,9 +252,6 @@ type testState struct {
|
||||
server *server
|
||||
mockStdinW *io.PipeWriter
|
||||
mockStdoutReader *bufio.Reader
|
||||
// readLineTimeout is the maximum duration of time to wait for [server] to
|
||||
// write a line to be written to the mock stdout.
|
||||
readLineTimeout time.Duration
|
||||
|
||||
fstestRun *fstest.Run
|
||||
remoteName string
|
||||
@@ -260,11 +270,6 @@ func makeTestState(t *testing.T) testState {
|
||||
},
|
||||
mockStdinW: stdinW,
|
||||
mockStdoutReader: bufio.NewReader(stdoutR),
|
||||
|
||||
// The default readLineTimeout must be large enough to accommodate slow
|
||||
// operations on real remotes. Without a timeout, attempts to read a
|
||||
// line that's never written would block indefinitely.
|
||||
readLineTimeout: time.Second * 30,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -272,52 +277,18 @@ func (h *testState) requireRemoteIsEmpty() {
|
||||
h.fstestRun.CheckRemoteItems(h.t)
|
||||
}
|
||||
|
||||
// readLineWithTimeout attempts to read a line from the mock stdout. Returns an
|
||||
// error if the read operation times out or fails for any reason.
|
||||
func (h *testState) readLineWithTimeout() (string, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), h.readLineTimeout)
|
||||
defer cancel()
|
||||
|
||||
lineChan := make(chan string)
|
||||
errChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
line, err := h.mockStdoutReader.ReadString('\n')
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
lineChan <- line
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case line := <-lineChan:
|
||||
return line, nil
|
||||
case err := <-errChan:
|
||||
return "", err
|
||||
case <-ctx.Done():
|
||||
return "", fmt.Errorf("attempt to read line timed out: %w", ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
// requireReadLineExact requires that a line matching wantLine can be read from
|
||||
// the mock stdout.
|
||||
func (h *testState) requireReadLineExact(wantLine string) {
|
||||
receivedLine, err := h.readLineWithTimeout()
|
||||
func (h *testState) requireReadLineExact(line string) {
|
||||
receivedLine, err := h.mockStdoutReader.ReadString('\n')
|
||||
require.NoError(h.t, err)
|
||||
require.Equal(h.t, wantLine+"\n", receivedLine)
|
||||
require.Equal(h.t, line+"\n", receivedLine)
|
||||
}
|
||||
|
||||
// requireReadLine requires that a line can be read from the mock stdout and
|
||||
// returns the line.
|
||||
func (h *testState) requireReadLine() string {
|
||||
receivedLine, err := h.readLineWithTimeout()
|
||||
receivedLine, err := h.mockStdoutReader.ReadString('\n')
|
||||
require.NoError(h.t, err)
|
||||
return receivedLine
|
||||
}
|
||||
|
||||
// requireWriteLine requires that the given line is successfully written to the
|
||||
// mock stdin.
|
||||
func (h *testState) requireWriteLine(line string) {
|
||||
_, err := h.mockStdinW.Write([]byte(line + "\n"))
|
||||
require.NoError(h.t, err)
|
||||
@@ -491,7 +462,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE " + h.remotePrefix)
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
|
||||
@@ -501,35 +472,6 @@ var fstestTestCases = []testCase{
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithUnknownLayout",
|
||||
testProtocolFunc: func(t *testing.T, h *testState) {
|
||||
h.requireReadLineExact("VERSION 1")
|
||||
h.requireWriteLine("EXTENSIONS INFO") // Advertise that we support the INFO extension
|
||||
h.requireReadLineExact("EXTENSIONS")
|
||||
|
||||
require.True(t, h.server.extensionInfo)
|
||||
|
||||
h.requireWriteLine("PREPARE")
|
||||
h.requireReadLineExact("GETCONFIG rcloneremotename")
|
||||
h.requireWriteLine("VALUE " + h.remoteName)
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE " + h.remotePrefix)
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE nonexistentLayoutMode")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
|
||||
require.Equal(t, h.server.configPrefix, h.remotePrefix)
|
||||
require.True(t, h.server.configsDone)
|
||||
|
||||
h.requireWriteLine("INITREMOTE")
|
||||
h.requireReadLineExact("INITREMOTE-FAILURE unknown layout mode: nonexistentLayoutMode")
|
||||
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
expectedError: "unknown layout mode: nonexistentLayoutMode",
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithNonexistentRemote",
|
||||
testProtocolFunc: func(t *testing.T, h *testState) {
|
||||
@@ -545,7 +487,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE " + h.remotePrefix)
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, h.server.configRcloneRemoteName, "thisRemoteDoesNotExist")
|
||||
@@ -553,11 +495,11 @@ var fstestTestCases = []testCase{
|
||||
require.True(t, h.server.configsDone)
|
||||
|
||||
h.requireWriteLine("INITREMOTE")
|
||||
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist")
|
||||
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist: thisRemoteDoesNotExist")
|
||||
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
expectedError: "remote does not exist or incorrectly contains a path: thisRemoteDoesNotExist",
|
||||
expectedError: "remote does not exist: thisRemoteDoesNotExist",
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithPathAsRemote",
|
||||
@@ -574,7 +516,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, h.server.configRcloneRemoteName, h.remotePrefix)
|
||||
@@ -584,13 +526,13 @@ var fstestTestCases = []testCase{
|
||||
h.requireWriteLine("INITREMOTE")
|
||||
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: "),
|
||||
regexp.MustCompile("^INITREMOTE-FAILURE remote does not exist: "),
|
||||
h.requireReadLine(),
|
||||
)
|
||||
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
expectedError: "remote does not exist or incorrectly contains a path:",
|
||||
expectedError: "remote does not exist:",
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithNonexistentBackendAsRemote",
|
||||
@@ -602,7 +544,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, ":nonexistentBackend:", h.server.configRcloneRemoteName)
|
||||
@@ -626,7 +568,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, ":local:", h.server.configRcloneRemoteName)
|
||||
@@ -649,7 +591,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, ":local", h.server.configRcloneRemoteName)
|
||||
@@ -657,11 +599,11 @@ var fstestTestCases = []testCase{
|
||||
require.True(t, h.server.configsDone)
|
||||
|
||||
h.requireWriteLine("INITREMOTE")
|
||||
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed: :local")
|
||||
h.requireReadLineExact("INITREMOTE-FAILURE remote could not be parsed as a backend: :local")
|
||||
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
expectedError: "remote could not be parsed:",
|
||||
expectedError: "remote could not be parsed as a backend:",
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithBackendContainingOptionsAsRemote",
|
||||
@@ -673,7 +615,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, ":local,description=banana:", h.server.configRcloneRemoteName)
|
||||
@@ -696,7 +638,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, ":local,description=banana:/bad/path", h.server.configRcloneRemoteName)
|
||||
@@ -704,38 +646,14 @@ var fstestTestCases = []testCase{
|
||||
require.True(t, h.server.configsDone)
|
||||
|
||||
h.requireWriteLine("INITREMOTE")
|
||||
h.requireReadLineExact("INITREMOTE-FAILURE remote does not exist or incorrectly contains a path: :local,description=banana:/bad/path")
|
||||
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
expectedError: "remote does not exist or incorrectly contains a path:",
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithRemoteContainingOptions",
|
||||
testProtocolFunc: func(t *testing.T, h *testState) {
|
||||
const envVar = "RCLONE_CONFIG_fake_remote_TYPE"
|
||||
require.NoError(t, os.Setenv(envVar, "memory"))
|
||||
t.Cleanup(func() { require.NoError(t, os.Unsetenv(envVar)) })
|
||||
|
||||
h.requireReadLineExact("VERSION 1")
|
||||
h.requireWriteLine("PREPARE")
|
||||
h.requireReadLineExact("GETCONFIG rcloneremotename")
|
||||
h.requireWriteLine("VALUE fake_remote,banana=yes:")
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE /foo")
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, "fake_remote,banana=yes:", h.server.configRcloneRemoteName)
|
||||
require.Equal(t, "/foo", h.server.configPrefix)
|
||||
require.True(t, h.server.configsDone)
|
||||
|
||||
h.requireWriteLine("INITREMOTE")
|
||||
h.requireReadLineExact("INITREMOTE-SUCCESS")
|
||||
require.Regexp(t,
|
||||
regexp.MustCompile("^INITREMOTE-FAILURE backend must not have a path: "),
|
||||
h.requireReadLine(),
|
||||
)
|
||||
|
||||
require.NoError(t, h.mockStdinW.Close())
|
||||
},
|
||||
expectedError: "backend must not have a path:",
|
||||
},
|
||||
{
|
||||
label: "HandlesPrepareWithSynonyms",
|
||||
@@ -756,7 +674,7 @@ var fstestTestCases = []testCase{
|
||||
h.requireReadLineExact("GETCONFIG rcloneprefix")
|
||||
h.requireWriteLine("VALUE " + h.remotePrefix)
|
||||
h.requireReadLineExact("GETCONFIG rclonelayout")
|
||||
h.requireWriteLine("VALUE frankencase")
|
||||
h.requireWriteLine("VALUE foo")
|
||||
h.requireReadLineExact("PREPARE-SUCCESS")
|
||||
|
||||
require.Equal(t, h.server.configRcloneRemoteName, h.remoteName)
|
||||
@@ -1363,46 +1281,6 @@ var fstestTestCases = []testCase{
|
||||
},
|
||||
}
|
||||
|
||||
// TestReadLineHasShortDeadline verifies that [testState.readLineWithTimeout]
|
||||
// does not block indefinitely when a line is never written.
|
||||
func TestReadLineHasShortDeadline(t *testing.T) {
|
||||
const timeoutForRead = time.Millisecond * 50
|
||||
const timeoutForTest = time.Millisecond * 100
|
||||
const tickDuration = time.Millisecond * 10
|
||||
|
||||
type readLineResult struct {
|
||||
line string
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan readLineResult)
|
||||
|
||||
go func() {
|
||||
defer close(resultChan)
|
||||
|
||||
h := makeTestState(t)
|
||||
h.readLineTimeout = timeoutForRead
|
||||
|
||||
line, err := h.readLineWithTimeout()
|
||||
resultChan <- readLineResult{line, err}
|
||||
}()
|
||||
|
||||
// This closure will be run periodically until time runs out or until all of
|
||||
// its assertions pass.
|
||||
idempotentConditionFunc := func(c *assert.CollectT) {
|
||||
result, ok := <-resultChan
|
||||
require.True(c, ok, "The goroutine should send a result")
|
||||
|
||||
require.Empty(c, result.line, "No line should be read")
|
||||
require.ErrorIs(c, result.err, context.DeadlineExceeded)
|
||||
|
||||
_, ok = <-resultChan
|
||||
require.False(c, ok, "The channel should be closed")
|
||||
}
|
||||
|
||||
require.EventuallyWithT(t, idempotentConditionFunc, timeoutForTest, tickDuration)
|
||||
}
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
@@ -1433,27 +1311,23 @@ func TestGitAnnexFstestBackendCases(t *testing.T) {
|
||||
handle.remoteName = remoteName
|
||||
handle.remotePrefix = remotePath
|
||||
|
||||
serverErrorChan := make(chan error)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
// Run the gitannex server and send the result back to the
|
||||
// goroutine associated with `t`. We can't use `require` here
|
||||
// because it could call `t.FailNow()`, which says it must be
|
||||
// called on the goroutine associated with the test.
|
||||
serverErrorChan <- handle.server.run()
|
||||
err := handle.server.run()
|
||||
|
||||
if testCase.expectedError == "" {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.ErrorContains(t, err, testCase.expectedError)
|
||||
}
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
defer wg.Wait()
|
||||
|
||||
testCase.testProtocolFunc(t, &handle)
|
||||
|
||||
serverError, ok := <-serverErrorChan
|
||||
require.True(t, ok, "Should receive one error/nil from server")
|
||||
require.Empty(t, serverErrorChan)
|
||||
|
||||
if testCase.expectedError == "" {
|
||||
require.NoError(t, serverError)
|
||||
} else {
|
||||
require.ErrorContains(t, serverError, testCase.expectedError)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,6 +191,7 @@ func setupRootCommand(rootCmd *cobra.Command) {
|
||||
})
|
||||
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
}
|
||||
|
||||
// Traverse the tree of commands running fn on each
|
||||
|
||||
@@ -6,8 +6,6 @@ package ncdu
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -927,19 +925,23 @@ func (u *UI) Run() error {
|
||||
return fmt.Errorf("screen init: %w", err)
|
||||
}
|
||||
|
||||
// Hijack log output so that it doesn't corrupt the screen.
|
||||
if !log.Redirected() {
|
||||
var logs []string
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
|
||||
if logOutput := fs.LogOutput; !log.Redirected() {
|
||||
type log struct {
|
||||
text string
|
||||
level fs.LogLevel
|
||||
}
|
||||
var logs []log
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
if len(logs) > 100 {
|
||||
logs = logs[len(logs)-100:]
|
||||
}
|
||||
logs = append(logs, text)
|
||||
})
|
||||
logs = append(logs, log{level: level, text: text})
|
||||
}
|
||||
defer func() {
|
||||
log.Handler.ResetOutput()
|
||||
for _, text := range logs {
|
||||
_, _ = os.Stderr.WriteString(text)
|
||||
fs.LogOutput = logOutput
|
||||
for i := range logs {
|
||||
logOutput(logs[i].level, logs[i].text)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/nfs"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfstest"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -40,7 +38,7 @@ func TestMount(t *testing.T) {
|
||||
nfs.Opt.HandleCacheDir = t.TempDir()
|
||||
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
|
||||
// Check we can create a handler
|
||||
_, err := nfs.NewHandler(context.Background(), vfs.New(object.MemoryFs, nil), &nfs.Opt)
|
||||
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
|
||||
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
|
||||
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
|
||||
}
|
||||
|
||||
@@ -5,11 +5,11 @@ package cmd
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
const (
|
||||
// interval between progress prints
|
||||
defaultProgressInterval = 500 * time.Millisecond
|
||||
// time format for logging
|
||||
logTimeFormat = "2006/01/02 15:04:05"
|
||||
)
|
||||
|
||||
// startProgress starts the progress bar printing
|
||||
@@ -26,13 +28,15 @@ const (
|
||||
// It returns a func which should be called to stop the stats.
|
||||
func startProgress() func() {
|
||||
stopStats := make(chan struct{})
|
||||
oldLogOutput := fs.LogOutput
|
||||
oldSyncPrint := operations.SyncPrintf
|
||||
|
||||
if !log.Redirected() {
|
||||
// Intercept the log calls if not logging to file or syslog
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
printProgress(text)
|
||||
})
|
||||
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Intercept output from functions such as HashLister to stdout
|
||||
@@ -56,10 +60,7 @@ func startProgress() func() {
|
||||
case <-stopStats:
|
||||
ticker.Stop()
|
||||
printProgress("")
|
||||
if !log.Redirected() {
|
||||
// Reset intercept of the log calls
|
||||
log.Handler.ResetOutput()
|
||||
}
|
||||
fs.LogOutput = oldLogOutput
|
||||
operations.SyncPrintf = oldSyncPrint
|
||||
fmt.Println("")
|
||||
return
|
||||
|
||||
@@ -3,7 +3,6 @@ package dlna
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -20,12 +19,9 @@ import (
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/anacrolix/log"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve"
|
||||
"github.com/rclone/rclone/cmd/serve/dlna/data"
|
||||
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/systemd"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
@@ -33,63 +29,9 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "addr",
|
||||
Default: ":7879",
|
||||
Help: "The ip:port or :port to bind the DLNA http server to",
|
||||
}, {
|
||||
Name: "name",
|
||||
Default: "",
|
||||
Help: "Name of DLNA server",
|
||||
}, {
|
||||
Name: "log_trace",
|
||||
Default: false,
|
||||
Help: "Enable trace logging of SOAP traffic",
|
||||
}, {
|
||||
Name: "interface",
|
||||
Default: []string{},
|
||||
Help: "The interface to use for SSDP (repeat as necessary)",
|
||||
}, {
|
||||
Name: "announce_interval",
|
||||
Default: fs.Duration(12 * time.Minute),
|
||||
Help: "The interval between SSDP announcements",
|
||||
}}
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string `config:"addr"`
|
||||
FriendlyName string `config:"name"`
|
||||
LogTrace bool `config:"log_trace"`
|
||||
InterfaceNames []string `config:"interface"`
|
||||
AnnounceInterval fs.Duration `config:"announce_interval"`
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
var Opt Options
|
||||
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
|
||||
flagSet := Command.Flags()
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
serve.Command.AddCommand(Command)
|
||||
serve.AddRc("dlna", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
|
||||
// Read VFS Opts
|
||||
var vfsOpt = vfscommon.Opt // set default opts
|
||||
err := configstruct.SetAny(in, &vfsOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read opts
|
||||
var opt = Opt // set default opts
|
||||
err = configstruct.SetAny(in, &opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create server
|
||||
return newServer(ctx, f, &opt, &vfsOpt)
|
||||
})
|
||||
dlnaflags.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
}
|
||||
|
||||
// Command definition for cobra.
|
||||
@@ -111,19 +53,7 @@ Rclone will add external subtitle files (.srt) to videos if they have the same
|
||||
filename as the video file itself (except the extension), either in the same
|
||||
directory as the video, or in a "Subs" subdirectory.
|
||||
|
||||
### Server options
|
||||
|
||||
Use ` + "`--addr`" + ` to specify which IP address and port the server should
|
||||
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
|
||||
IPs.
|
||||
|
||||
Use ` + "`--name`" + ` to choose the friendly server name, which is by
|
||||
default "rclone (hostname)".
|
||||
|
||||
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
|
||||
logging of all UPNP traffic.
|
||||
|
||||
` + vfs.Help(),
|
||||
` + dlnaflags.Help + vfs.Help(),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.46",
|
||||
"groups": "Filter",
|
||||
@@ -133,12 +63,16 @@ logging of all UPNP traffic.
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt)
|
||||
s, err := newServer(f, &dlnaflags.Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.Serve(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer systemd.Notify()()
|
||||
return s.Serve()
|
||||
s.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
@@ -174,7 +108,7 @@ type server struct {
|
||||
vfs *vfs.VFS
|
||||
}
|
||||
|
||||
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options) (*server, error) {
|
||||
func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
|
||||
friendlyName := opt.FriendlyName
|
||||
if friendlyName == "" {
|
||||
friendlyName = makeDefaultFriendlyName()
|
||||
@@ -203,7 +137,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
waitChan: make(chan struct{}),
|
||||
httpListenAddr: opt.ListenAddr,
|
||||
f: f,
|
||||
vfs: vfs.New(f, vfsOpt),
|
||||
vfs: vfs.New(f, &vfscommon.Opt),
|
||||
}
|
||||
|
||||
s.services = map[string]UPnPService{
|
||||
@@ -234,19 +168,6 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
http.FileServer(data.Assets))))
|
||||
s.handler = logging(withHeader("Server", serverField, r))
|
||||
|
||||
// Currently, the SSDP server only listens on an IPv4 multicast address.
|
||||
// Differentiate between two INADDR_ANY addresses,
|
||||
// so that 0.0.0.0 can only listen on IPv4 addresses.
|
||||
network := "tcp4"
|
||||
if strings.Count(s.httpListenAddr, ":") > 1 {
|
||||
network = "tcp"
|
||||
}
|
||||
listener, err := net.Listen(network, s.httpListenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.HTTPConn = listener
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -367,9 +288,24 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeContent(w, r, remotePath, node.ModTime(), in)
|
||||
}
|
||||
|
||||
// Serve runs the server - returns the error only if the listener was
|
||||
// not started. Blocks until the server is closed.
|
||||
// Serve runs the server - returns the error only if
|
||||
// the listener was not started; does not block, so
|
||||
// use s.Wait() to block on the listener indefinitely.
|
||||
func (s *server) Serve() (err error) {
|
||||
if s.HTTPConn == nil {
|
||||
// Currently, the SSDP server only listens on an IPv4 multicast address.
|
||||
// Differentiate between two INADDR_ANY addresses,
|
||||
// so that 0.0.0.0 can only listen on IPv4 addresses.
|
||||
network := "tcp4"
|
||||
if strings.Count(s.httpListenAddr, ":") > 1 {
|
||||
network = "tcp"
|
||||
}
|
||||
s.HTTPConn, err = net.Listen(network, s.httpListenAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
s.startSSDP()
|
||||
}()
|
||||
@@ -383,7 +319,6 @@ func (s *server) Serve() (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
s.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -392,19 +327,13 @@ func (s *server) Wait() {
|
||||
<-s.waitChan
|
||||
}
|
||||
|
||||
// Shutdown the DLNA server
|
||||
func (s *server) Shutdown() error {
|
||||
func (s *server) Close() {
|
||||
err := s.HTTPConn.Close()
|
||||
close(s.waitChan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to shutdown DLNA server: %w", err)
|
||||
fs.Errorf(s.f, "Error closing HTTP server: %v", err)
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return the first address of the server
|
||||
func (s *server) Addr() net.Addr {
|
||||
return s.HTTPConn.Addr()
|
||||
close(s.waitChan)
|
||||
}
|
||||
|
||||
// Run SSDP (multicast for server discovery) on all interfaces.
|
||||
|
||||
@@ -13,13 +13,11 @@ import (
|
||||
|
||||
"github.com/anacrolix/dms/soap"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -35,14 +33,12 @@ const (
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
opt := Opt
|
||||
opt := dlnaflags.Opt
|
||||
opt.ListenAddr = testBindAddress
|
||||
var err error
|
||||
dlnaServer, err = newServer(context.Background(), f, &opt, &vfscommon.Opt)
|
||||
dlnaServer, err = newServer(f, &opt)
|
||||
assert.NoError(t, err)
|
||||
go func() {
|
||||
assert.NoError(t, dlnaServer.Serve())
|
||||
}()
|
||||
assert.NoError(t, dlnaServer.Serve())
|
||||
baseURL = "http://" + dlnaServer.HTTPConn.Addr().String()
|
||||
}
|
||||
|
||||
@@ -275,10 +271,3 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
servetest.TestRc(t, rc.Params{
|
||||
"type": "dlna",
|
||||
"vfs_cache_mode": "off",
|
||||
})
|
||||
}
|
||||
|
||||
69
cmd/serve/dlna/dlnaflags/dlnaflags.go
Normal file
69
cmd/serve/dlna/dlnaflags/dlnaflags.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Package dlnaflags provides utility functionality to DLNA.
|
||||
package dlnaflags
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Help contains the text for the command line help and manual.
|
||||
var Help = `### Server options
|
||||
|
||||
Use ` + "`--addr`" + ` to specify which IP address and port the server should
|
||||
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to listen to all
|
||||
IPs.
|
||||
|
||||
Use ` + "`--name`" + ` to choose the friendly server name, which is by
|
||||
default "rclone (hostname)".
|
||||
|
||||
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
|
||||
logging of all UPNP traffic.
|
||||
|
||||
`
|
||||
|
||||
// OptionsInfo descripts the Options in use
|
||||
var OptionsInfo = fs.Options{{
|
||||
Name: "addr",
|
||||
Default: ":7879",
|
||||
Help: "The ip:port or :port to bind the DLNA http server to",
|
||||
}, {
|
||||
Name: "name",
|
||||
Default: "",
|
||||
Help: "Name of DLNA server",
|
||||
}, {
|
||||
Name: "log_trace",
|
||||
Default: false,
|
||||
Help: "Enable trace logging of SOAP traffic",
|
||||
}, {
|
||||
Name: "interface",
|
||||
Default: []string{},
|
||||
Help: "The interface to use for SSDP (repeat as necessary)",
|
||||
}, {
|
||||
Name: "announce_interval",
|
||||
Default: fs.Duration(12 * time.Minute),
|
||||
Help: "The interval between SSDP announcements",
|
||||
}}
|
||||
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "dlna", Opt: &Opt, Options: OptionsInfo})
|
||||
}
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string `config:"addr"`
|
||||
FriendlyName string `config:"name"`
|
||||
LogTrace bool `config:"log_trace"`
|
||||
InterfaceNames []string `config:"interface"`
|
||||
AnnounceInterval fs.Duration `config:"announce_interval"`
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
var Opt Options
|
||||
|
||||
// AddFlags add the command line flags for DLNA serving.
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/cmd/serve"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
@@ -51,8 +50,6 @@ func init() {
|
||||
// Add common mount/vfs flags
|
||||
mountlib.AddFlags(cmdFlags)
|
||||
vfsflags.AddFlags(cmdFlags)
|
||||
// Register with parent command
|
||||
serve.Command.AddCommand(Command)
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
|
||||
@@ -18,16 +18,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
@@ -73,8 +70,8 @@ type Options struct {
|
||||
ListenAddr string `config:"addr"` // Port to listen on
|
||||
PublicIP string `config:"public_ip"` // Passive ports range
|
||||
PassivePorts string `config:"passive_port"` // Passive ports range
|
||||
User string `config:"user"` // single username for basic auth if not using Htpasswd
|
||||
Pass string `config:"pass"` // password for User
|
||||
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
|
||||
BasicPass string `config:"pass"` // password for BasicUser
|
||||
TLSCert string `config:"cert"` // TLS PEM key (concatenation of certificate and CA certificate)
|
||||
TLSKey string `config:"key"` // TLS PEM Private key
|
||||
}
|
||||
@@ -91,29 +88,6 @@ func init() {
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
proxyflags.AddFlags(Command.Flags())
|
||||
AddFlags(Command.Flags())
|
||||
serve.Command.AddCommand(Command)
|
||||
serve.AddRc("ftp", func(ctx context.Context, f fs.Fs, in rc.Params) (serve.Handle, error) {
|
||||
// Read VFS Opts
|
||||
var vfsOpt = vfscommon.Opt // set default opts
|
||||
err := configstruct.SetAny(in, &vfsOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read Proxy Opts
|
||||
var proxyOpt = proxy.Opt // set default opts
|
||||
err = configstruct.SetAny(in, &proxyOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read opts
|
||||
var opt = Opt // set default opts
|
||||
err = configstruct.SetAny(in, &opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create server
|
||||
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
|
||||
})
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -147,18 +121,18 @@ You can set a single username and password with the --user and --pass flags.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxy.Opt.AuthProxy == "" {
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
|
||||
s, err := newServer(context.Background(), f, &Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Serve()
|
||||
return s.serve()
|
||||
})
|
||||
},
|
||||
}
|
||||
@@ -183,7 +157,7 @@ func init() {
|
||||
var passivePortsRe = regexp.MustCompile(`^\s*\d+\s*-\s*\d+\s*$`)
|
||||
|
||||
// Make a new FTP to serve the remote
|
||||
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (*driver, error) {
|
||||
func newServer(ctx context.Context, f fs.Fs, opt *Options) (*driver, error) {
|
||||
host, port, err := net.SplitHostPort(opt.ListenAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse host:port from %q", opt.ListenAddr)
|
||||
@@ -198,11 +172,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
ctx: ctx,
|
||||
opt: *opt,
|
||||
}
|
||||
if proxy.Opt.AuthProxy != "" {
|
||||
d.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
d.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
d.userPass = make(map[string]string, 16)
|
||||
} else {
|
||||
d.globalVFS = vfs.New(f, vfsOpt)
|
||||
d.globalVFS = vfs.New(f, &vfscommon.Opt)
|
||||
}
|
||||
d.useTLS = d.opt.TLSKey != ""
|
||||
|
||||
@@ -234,58 +208,20 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Serve runs the FTP server until it is shutdown
|
||||
func (d *driver) Serve() error {
|
||||
// serve runs the ftp server
|
||||
func (d *driver) serve() error {
|
||||
fs.Logf(d.f, "Serving FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
|
||||
err := d.srv.ListenAndServe()
|
||||
if err == ftp.ErrServerClosed {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
return d.srv.ListenAndServe()
|
||||
}
|
||||
|
||||
// Shutdown stops the ftp server
|
||||
// close stops the ftp server
|
||||
//
|
||||
//lint:ignore U1000 unused when not building linux
|
||||
func (d *driver) Shutdown() error {
|
||||
func (d *driver) close() error {
|
||||
fs.Logf(d.f, "Stopping FTP on %s", d.srv.Hostname+":"+strconv.Itoa(d.srv.Port))
|
||||
return d.srv.Shutdown()
|
||||
}
|
||||
|
||||
// Return the first address of the server
|
||||
func (d *driver) Addr() net.Addr {
|
||||
// The FTP server doesn't let us read the listener
|
||||
// so we have to synthesize the net.Addr here.
|
||||
// On errors we'll return a zero item or zero parts.
|
||||
addr := &net.TCPAddr{}
|
||||
|
||||
// Split host and port
|
||||
host, port, err := net.SplitHostPort(d.opt.ListenAddr)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "ftp: addr: invalid address format: %v", err)
|
||||
return addr
|
||||
}
|
||||
|
||||
// Parse port
|
||||
addr.Port, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "ftp: addr: invalid port number: %v", err)
|
||||
}
|
||||
|
||||
// Resolve the host to an IP address.
|
||||
ipAddrs, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "ftp: addr: failed to resolve host: %v", err)
|
||||
} else if len(ipAddrs) == 0 {
|
||||
fs.Errorf(nil, "ftp: addr: no IP addresses found for host: %s", host)
|
||||
} else {
|
||||
// Choose the first IP address.
|
||||
addr.IP = ipAddrs[0]
|
||||
}
|
||||
|
||||
return addr
|
||||
}
|
||||
|
||||
// Logger ftp logger output formatted message
|
||||
type Logger struct{}
|
||||
|
||||
@@ -333,7 +269,7 @@ func (d *driver) CheckPasswd(sctx *ftp.Context, user, pass string) (ok bool, err
|
||||
d.userPass[user] = oPass
|
||||
d.userPassMu.Unlock()
|
||||
} else {
|
||||
ok = d.opt.User == user && (d.opt.Pass == "" || d.opt.Pass == pass)
|
||||
ok = d.opt.BasicUser == user && (d.opt.BasicPass == "" || d.opt.BasicPass == pass)
|
||||
if !ok {
|
||||
fs.Infof(nil, "login failed: bad credentials")
|
||||
return false, nil
|
||||
|
||||
@@ -12,15 +12,12 @@ import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/israce"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
ftp "goftp.io/server/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,16 +36,19 @@ func TestFTP(t *testing.T) {
|
||||
opt := Opt
|
||||
opt.ListenAddr = testHOST + ":" + testPORT
|
||||
opt.PassivePorts = testPASSIVEPORTRANGE
|
||||
opt.User = testUSER
|
||||
opt.Pass = testPASS
|
||||
opt.BasicUser = testUSER
|
||||
opt.BasicPass = testPASS
|
||||
|
||||
w, err := newServer(context.Background(), f, &opt, &vfscommon.Opt, &proxy.Opt)
|
||||
w, err := newServer(context.Background(), f, &opt)
|
||||
assert.NoError(t, err)
|
||||
|
||||
quit := make(chan struct{})
|
||||
go func() {
|
||||
assert.NoError(t, w.Serve())
|
||||
err := w.serve()
|
||||
close(quit)
|
||||
if err != ftp.ErrServerClosed {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Config for the backend we'll use to connect to the server
|
||||
@@ -61,7 +61,7 @@ func TestFTP(t *testing.T) {
|
||||
}
|
||||
|
||||
return config, func() {
|
||||
err := w.Shutdown()
|
||||
err := w.close()
|
||||
assert.NoError(t, err)
|
||||
<-quit
|
||||
}
|
||||
@@ -69,13 +69,3 @@ func TestFTP(t *testing.T) {
|
||||
|
||||
servetest.Run(t, "ftp", start)
|
||||
}
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
if israce.Enabled {
|
||||
t.Skip("Skipping under race detector as underlying library is racy")
|
||||
}
|
||||
servetest.TestRc(t, rc.Params{
|
||||
"type": "ftp",
|
||||
"vfs_cache_mode": "off",
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -16,14 +15,10 @@ import (
|
||||
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
cmdserve "github.com/rclone/rclone/cmd/serve"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/lib/systemd"
|
||||
@@ -33,12 +28,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// OptionsInfo describes the Options in use
|
||||
var OptionsInfo = fs.Options{}.
|
||||
Add(libhttp.ConfigInfo).
|
||||
Add(libhttp.AuthConfigInfo).
|
||||
Add(libhttp.TemplateConfigInfo)
|
||||
|
||||
// Options required for http server
|
||||
type Options struct {
|
||||
Auth libhttp.AuthConfig
|
||||
@@ -56,42 +45,17 @@ var DefaultOpt = Options{
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
|
||||
func init() {
|
||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "http", Opt: &Opt, Options: OptionsInfo})
|
||||
}
|
||||
|
||||
// flagPrefix is the prefix used to uniquely identify command line flags.
|
||||
// It is intentionally empty for this package.
|
||||
const flagPrefix = ""
|
||||
|
||||
func init() {
|
||||
flagSet := Command.Flags()
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
libhttp.AddAuthFlagsPrefix(flagSet, flagPrefix, &Opt.Auth)
|
||||
libhttp.AddHTTPFlagsPrefix(flagSet, flagPrefix, &Opt.HTTP)
|
||||
libhttp.AddTemplateFlagsPrefix(flagSet, flagPrefix, &Opt.Template)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
cmdserve.Command.AddCommand(Command)
|
||||
cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
|
||||
// Read VFS Opts
|
||||
var vfsOpt = vfscommon.Opt // set default opts
|
||||
err := configstruct.SetAny(in, &vfsOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read Proxy Opts
|
||||
var proxyOpt = proxy.Opt // set default opts
|
||||
err = configstruct.SetAny(in, &proxyOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Read opts
|
||||
var opt = Opt // set default opts
|
||||
err = configstruct.SetAny(in, &opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create server
|
||||
return newServer(ctx, f, &opt, &vfsOpt, &proxyOpt)
|
||||
})
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -117,7 +81,7 @@ control the stats printing.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxy.Opt.AuthProxy == "" {
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f = cmd.NewFsSrc(args)
|
||||
} else {
|
||||
@@ -125,12 +89,14 @@ control the stats printing.
|
||||
}
|
||||
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s, err := newServer(context.Background(), f, &Opt, &vfscommon.Opt, &proxy.Opt)
|
||||
s, err := run(context.Background(), f, Opt)
|
||||
if err != nil {
|
||||
fs.Fatal(nil, fmt.Sprint(err))
|
||||
}
|
||||
|
||||
defer systemd.Notify()()
|
||||
return s.Serve()
|
||||
s.server.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
@@ -170,19 +136,19 @@ func (s *HTTP) auth(user, pass string) (value any, err error) {
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Options, proxyOpt *proxy.Options) (s *HTTP, err error) {
|
||||
func run(ctx context.Context, f fs.Fs, opt Options) (s *HTTP, err error) {
|
||||
s = &HTTP{
|
||||
f: f,
|
||||
ctx: ctx,
|
||||
opt: *opt,
|
||||
opt: opt,
|
||||
}
|
||||
|
||||
if proxyOpt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(ctx, proxyOpt, vfsOpt)
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
s.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
// override auth
|
||||
s.opt.Auth.CustomAuthFn = s.auth
|
||||
} else {
|
||||
s._vfs = vfs.New(f, vfsOpt)
|
||||
s._vfs = vfs.New(f, &vfscommon.Opt)
|
||||
}
|
||||
|
||||
s.server, err = libhttp.NewServer(ctx,
|
||||
@@ -202,24 +168,9 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
|
||||
router.Get("/*", s.handler)
|
||||
router.Head("/*", s.handler)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Serve HTTP until the server is shutdown
|
||||
func (s *HTTP) Serve() error {
|
||||
s.server.Serve()
|
||||
s.server.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Addr returns the first address of the server
|
||||
func (s *HTTP) Addr() net.Addr {
|
||||
return s.server.Addr()
|
||||
}
|
||||
|
||||
// Shutdown the server
|
||||
func (s *HTTP) Shutdown() error {
|
||||
return s.server.Shutdown()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// handler reads incoming requests and dispatches them
|
||||
|
||||
@@ -12,13 +12,10 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -42,16 +39,13 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
|
||||
},
|
||||
}
|
||||
opts.HTTP.ListenAddr = []string{testBindAddress}
|
||||
if proxy.Opt.AuthProxy == "" {
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
opts.Auth.BasicUser = testUser
|
||||
opts.Auth.BasicPass = testPass
|
||||
}
|
||||
|
||||
s, err := newServer(ctx, f, &opts, &vfscommon.Opt, &proxy.Opt)
|
||||
s, err := run(ctx, f, opts)
|
||||
require.NoError(t, err, "failed to start server")
|
||||
go func() {
|
||||
require.NoError(t, s.Serve())
|
||||
}()
|
||||
|
||||
urls := s.server.URLs()
|
||||
require.Len(t, urls, 1, "expected one URL")
|
||||
@@ -116,9 +110,9 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
cmd := "go run " + prog + " " + files
|
||||
|
||||
// FIXME this is untidy setting a global variable!
|
||||
proxy.Opt.AuthProxy = cmd
|
||||
proxyflags.Opt.AuthProxy = cmd
|
||||
defer func() {
|
||||
proxy.Opt.AuthProxy = ""
|
||||
proxyflags.Opt.AuthProxy = ""
|
||||
}()
|
||||
|
||||
f = nil
|
||||
@@ -273,10 +267,3 @@ func TestGET(t *testing.T) {
|
||||
func TestAuthProxy(t *testing.T) {
|
||||
testGET(t, true)
|
||||
}
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
servetest.TestRc(t, rc.Params{
|
||||
"type": "http",
|
||||
"vfs_cache_mode": "off",
|
||||
})
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user