1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

..

10 Commits

Author SHA1 Message Date
Nick Craig-Wood
ab60a77aba cluster: make workers write status and controller read the status
The controller will retry the batches if it loses contact with the
worker.
2025-10-16 15:50:52 +01:00
Nick Craig-Wood
09535a06f7 cluster: add docs 2025-10-16 15:48:32 +01:00
Nick Craig-Wood
173b720173 rcd: obey --cluster if set to run as a cluster server 2025-10-16 15:48:32 +01:00
Nick Craig-Wood
6660e6ec7c sync,move,copy: add --cluster support 2025-10-16 15:48:32 +01:00
Nick Craig-Wood
14c604335e cluster: implement --cluster and related flags FIXME WIP
Needs
- tests
2025-10-16 15:48:32 +01:00
Nick Craig-Wood
bfcb23b7b2 accounting: add AccountReadN for use in cluster 2025-10-16 15:48:32 +01:00
Nick Craig-Wood
46dbdb8cb7 fs: add NonDefaultRC for discovering options in use
This enables us to send rc messages with the config in use.
2025-10-16 15:48:32 +01:00
Nick Craig-Wood
17932fcc38 fs: move tests into correct files 2025-10-16 15:48:32 +01:00
Nick Craig-Wood
77faa787e1 rc: add NewJobFromBytes for reading jobs from non HTTP transactions 2025-10-16 15:48:32 +01:00
Nick Craig-Wood
0701dd55cd rc: add job/batch for sending batches of rc commands to run concurrently 2025-10-16 15:48:32 +01:00
166 changed files with 5396 additions and 7102 deletions

View File

@@ -291,7 +291,7 @@ jobs:
README.md
RELEASE.md
CODE_OF_CONDUCT.md
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
docs/content/{authors,bugs,changelog,cluster,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
- name: Scan edits of autogenerated files
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'

View File

@@ -183,7 +183,7 @@ jobs:
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
uses: actions/download-artifact@v5
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -19,11 +19,6 @@ linters:
- unconvert
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
govet:
enable-all: true
disable:
- fieldalignment
- shadow
staticcheck:
# With staticcheck there is only one setting, so to extend the implicit
# default value it must be explicitly included.

View File

@@ -621,7 +621,44 @@ in the web browser and the links (internal and external) all work.
## Adding a new s3 provider
[Please see the guide in the S3 backend directory](backend/s3/README.md).
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Make sure this is in alphabetical order in the `Providers` section.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin

View File

@@ -34,7 +34,6 @@ directories to and from different cloud storage providers.
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
@@ -107,7 +106,6 @@ directories to and from different cloud storage providers.
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
@@ -131,7 +129,6 @@ Please see [the full list of all storage providers and their features](https://r
These backends adapt or modify other storage providers
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)

View File

@@ -4,7 +4,6 @@ package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
_ "github.com/rclone/rclone/backend/archive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2"

View File

@@ -1,679 +0,0 @@
//go:build !plan9
// Package archive implements a backend to access archive files in a remote
package archive
// FIXME factor common code between backends out - eg VFS initialization
// FIXME can we generalize the VFS handle caching and use it in zip backend
// Factor more stuff out if possible
// Odd stats which are probably coming from the VFS
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
// at multiple streams - need cache mode setting?
import (
"context"
"errors"
"fmt"
"io"
"path"
"strings"
"sync"
"time"
// Import all the required archivers here
_ "github.com/rclone/rclone/backend/archive/squashfs"
_ "github.com/rclone/rclone/backend/archive/zip"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
)
// Register with Fs
func init() {
fsi := &fs.RegInfo{
Name: "archive",
Description: "Read archives",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
Help: `Any metadata supported by the underlying remote is read and written.`,
},
Options: []fs.Option{{
Name: "remote",
Help: `Remote to wrap to read archives from.
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
"myremote:bucket" or "myremote:".
If this is left empty, then the archive backend will use the root as
the remote.
This means that you can use :archive:remote:path and it will be
equivalent to setting remote="remote:path".
`,
Required: false,
}},
}
fs.Register(fsi)
}
// Options defines the configuration for this backend
type Options struct {
Remote string `config:"remote"`
}
// Fs represents a archive of upstreams
type Fs struct {
name string // name of this remote
features *fs.Features // optional features
opt Options // options for this Fs
root string // the path we are working on
f fs.Fs // remote we are wrapping
wrapper fs.Fs // fs that wraps us
mu sync.Mutex // protects the below
archives map[string]*archive // the archives we have, by path
}
// A single open archive
type archive struct {
archiver archiver.Archiver // archiver responsible
remote string // path to the archive
prefix string // prefix to add on to listings
root string // root of the archive to remove from listings
mu sync.Mutex // protects the following variables
f fs.Fs // the archive Fs, may be nil
}
// If remote is an archive then return it otherwise return nil
func findArchive(remote string) *archive {
// FIXME use something faster than linear search?
for _, archiver := range archiver.Archivers {
if strings.HasSuffix(remote, archiver.Extension) {
return &archive{
archiver: archiver,
remote: remote,
prefix: remote,
root: "",
}
}
}
return nil
}
// Find an archive buried in remote
func subArchive(remote string) *archive {
archive := findArchive(remote)
if archive != nil {
return archive
}
parent := path.Dir(remote)
if parent == "/" || parent == "." {
return nil
}
return subArchive(parent)
}
// If remote is an archive then return it otherwise return nil
func (f *Fs) findArchive(remote string) (archive *archive) {
archive = findArchive(remote)
if archive != nil {
f.mu.Lock()
f.archives[remote] = archive
f.mu.Unlock()
}
return archive
}
// Instantiate archive if it hasn't been instantiated yet
//
// This is done lazily so that we can list a directory full of
// archives without opening them all.
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
a.mu.Lock()
defer a.mu.Unlock()
if a.f != nil {
return a.f, nil
}
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
if err != nil && err != fs.ErrorIsFile {
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
}
a.f = newFs
return a.f, nil
}
// NewFs constructs an Fs from the path.
//
// The returned Fs is the actual Fs, referenced by remote in the config
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
// Parse config into Options struct
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
remote := opt.Remote
origRoot := root
// If remote is empty, use the root instead
if remote == "" {
remote = root
root = ""
}
isDirectory := strings.HasSuffix(remote, "/")
remote = strings.TrimRight(remote, "/")
if remote == "" {
remote = "/"
}
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
}
_ = isDirectory
foundArchive := subArchive(remote)
if foundArchive != nil {
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
// Archive path
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
// Path to the archive
archiveRemote := remote[:len(foundArchive.remote)]
// Remote is archive leaf name
foundArchive.remote = path.Base(archiveRemote)
foundArchive.prefix = ""
// Point remote to archive file
remote = archiveRemote
}
// Make sure to remove trailing . referring to the current dir
if path.Base(root) == "." {
root = strings.TrimSuffix(root, ".")
}
remotePath := fspath.JoinRootPath(remote, root)
wrappedFs, err := cache.Get(ctx, remotePath)
if err != fs.ErrorIsFile && err != nil {
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
}
f := &Fs{
name: name,
//root: path.Join(remotePath, root),
root: origRoot,
opt: *opt,
f: wrappedFs,
archives: make(map[string]*archive),
}
cache.PinUntilFinalized(f.f, f)
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,
ReadMimeType: true,
WriteMimeType: true,
CanHaveEmptyDirectories: true,
BucketBased: true,
SetTier: true,
GetTier: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if foundArchive != nil {
fs.Debugf(f, "Root is an archive")
if err != fs.ErrorIsFile {
return nil, fmt.Errorf("expecting to find a file at %q", remote)
}
return foundArchive.init(ctx, f.f)
}
// Correct root if definitely pointing to a file
if err == fs.ErrorIsFile {
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
return f, err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("archive root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Rmdir removes the root directory of the Fs object
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return f.f.Rmdir(ctx, dir)
}
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
func (f *Fs) Hashes() hash.Set {
return f.f.Hashes()
}
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.f.Mkdir(ctx, dir)
}
// Purge all files in the directory
//
// Implement this if you have a way of deleting all the files
// quicker than just running Remove() on the result of List()
//
// Return an error if it doesn't exist
func (f *Fs) Purge(ctx context.Context, dir string) error {
do := f.f.Features().Purge
if do == nil {
return fs.ErrorCantPurge
}
return do(ctx, dir)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Copy
if do == nil {
return nil, fs.ErrorCantCopy
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantCopy
// }
return do(ctx, src, remote)
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
do := f.f.Features().Move
if do == nil {
return nil, fs.ErrorCantMove
}
// FIXME
// o, ok := src.(*Object)
// if !ok {
// return nil, fs.ErrorCantMove
// }
return do(ctx, src, remote)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
do := f.f.Features().DirMove
if do == nil {
return fs.ErrorCantDirMove
}
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(srcFs, "Can't move directory - not same remote type")
return fs.ErrorCantDirMove
}
return do(ctx, srcFs.f, srcRemote, dstRemote)
}
// ChangeNotify calls the passed function with a path
// that has had changes. If the implementation
// uses polling, it should adhere to the given interval.
// At least one value will be written to the channel,
// specifying the initial value and updated values might
// follow. A 0 Duration should pause the polling.
// The ChangeNotify implementation must empty the channel
// regularly. When the channel gets closed, the implementation
// should stop polling and release resources.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
do := f.f.Features().ChangeNotify
if do == nil {
return
}
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
notifyFunc(path, entryType)
}
do(ctx, wrappedNotifyFunc, ch)
}
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
do := f.f.Features().DirCacheFlush
if do != nil {
do()
}
}
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
var o fs.Object
var err error
if stream {
o, err = f.f.Features().PutStream(ctx, in, src, options...)
} else {
o, err = f.f.Put(ctx, in, src, options...)
}
if err != nil {
return nil, err
}
return o, nil
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, false, options...)
default:
return nil, err
}
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return o, o.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
return f.put(ctx, in, src, true, options...)
default:
return nil, err
}
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.f.Features().About
if do == nil {
return nil, errors.New("not supported by underlying remote")
}
return do(ctx)
}
// Find the Fs for the directory
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
f.mu.Lock()
defer f.mu.Unlock()
subFs = f.f
// FIXME should do this with a better datastructure like a prefix tree
// FIXME want to find the longest first otherwise nesting won't work
dirSlash := dir + "/"
for archiverRemote, archive := range f.archives {
subRemote := archiverRemote + "/"
if strings.HasPrefix(dirSlash, subRemote) {
subFs, err = archive.init(ctx, f.f)
if err != nil {
return nil, err
}
break
}
}
return subFs, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
entries, err = subFs.List(ctx, dir)
if err != nil {
return nil, err
}
for i, entry := range entries {
// Can only unarchive files
if o, ok := entry.(fs.Object); ok {
remote := o.Remote()
archive := f.findArchive(remote)
if archive != nil {
// Overwrite entry with directory
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
}
}
}
return entries, nil
}
// NewObject creates a new remote archive file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
dir := path.Dir(remote)
if dir == "/" || dir == "." {
dir = ""
}
subFs, err := f.findFs(ctx, dir)
if err != nil {
return nil, err
}
o, err := subFs.NewObject(ctx, remote)
if err != nil {
return nil, err
}
return o, nil
}
// Precision is the greatest precision of all the archivers
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
if do := f.f.Features().Shutdown; do != nil {
return do(ctx)
}
return nil
}
// PublicLink generates a public link to the remote path (usually readable by anyone)
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
do := f.f.Features().PublicLink
if do == nil {
return "", errors.New("PublicLink not supported")
}
return do(ctx, remote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
//
// May create duplicates or return errors if src already
// exists.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.f.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
o, err := do(ctx, in, src, options...)
if err != nil {
return nil, err
}
return o, nil
}
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
if len(dirs) == 0 {
return nil
}
do := f.f.Features().MergeDirs
if do == nil {
return errors.New("MergeDirs not supported")
}
return do(ctx, dirs)
}
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
// otherwise cleaning up old versions of files.
func (f *Fs) CleanUp(ctx context.Context) error {
do := f.f.Features().CleanUp
if do == nil {
return errors.New("not supported by underlying remote")
}
return do(ctx)
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
//
// It truncates any existing object
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
do := f.f.Features().OpenWriterAt
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx, remote, size)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
do := f.f.Features().OpenChunkWriter
if do == nil {
return info, nil, fs.ErrorNotImplemented
}
return do(ctx, remote, src, options...)
}
// UserInfo returns info about the connected user
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
do := f.f.Features().UserInfo
if do == nil {
return nil, fs.ErrorNotImplemented
}
return do(ctx)
}
// Disconnect the current user
func (f *Fs) Disconnect(ctx context.Context) error {
do := f.f.Features().Disconnect
if do == nil {
return fs.ErrorNotImplemented
}
return do(ctx)
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.Purger = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.OpenChunkWriter = (*Fs)(nil)
_ fs.UserInfoer = (*Fs)(nil)
_ fs.Disconnecter = (*Fs)(nil)
// FIXME _ fs.FullObject = (*Object)(nil)
)

View File

@@ -1,221 +0,0 @@
//go:build !plan9
package archive
import (
"bytes"
"context"
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"testing"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// FIXME need to test Open with seek
// run - run a shell command
func run(t *testing.T, args ...string) {
cmd := exec.Command(args[0], args[1:]...)
fs.Debugf(nil, "run args = %v", args)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf(`
----------------------------
Failed to run %v: %v
Command output was:
%s
----------------------------
`, args, err, out)
}
}
// check the dst and src are identical
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
t.Run(name, func(t *testing.T) {
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
Farchive, err := cache.Get(ctx, dstArchive)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
Fsrc, err := cache.Get(ctx, src)
if err != fs.ErrorIsFile {
require.NoError(t, err)
}
var matches bytes.Buffer
opt := operations.CheckOpt{
Fdst: Farchive,
Fsrc: Fsrc,
Match: &matches,
}
for _, action := range []string{"Check", "Download"} {
t.Run(action, func(t *testing.T) {
matches.Reset()
if action == "Download" {
assert.NoError(t, operations.CheckDownload(ctx, &opt))
} else {
assert.NoError(t, operations.Check(ctx, &opt))
}
if expectedCount > 0 {
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
}
})
}
t.Run("NewObject", func(t *testing.T) {
// Check we can run NewObject on all files and read them
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
if t.Failed() {
return
}
remote := srcObj.Remote()
archiveObj, err := Farchive.NewObject(ctx, remote)
require.NoError(t, err, remote)
assert.Equal(t, remote, archiveObj.Remote(), remote)
// Test that the contents are the same
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
assert.Equal(t, srcBuf, archiveBuf)
if len(srcBuf) < 81 {
return
}
// Tests that Open works with SeekOption
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
// Tests that Open works with RangeOption
for _, test := range []struct {
ro fs.RangeOption
wantStart, wantEnd int
}{
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
} {
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
foundAt := strings.Index(srcBuf, got)
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
}
// Test that the modtimes are correct
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
// Test that the sizes are correct
assert.Equal(t, srcObj.Size(), archiveObj.Size())
// Test that Strings are OK
assert.Equal(t, srcObj.String(), archiveObj.String())
}))
})
// t.Logf("Fdst ------------- %v", Fdst)
// operations.List(ctx, Fdst, os.Stdout)
// t.Logf("Fsrc ------------- %v", Fsrc)
// operations.List(ctx, Fsrc, os.Stdout)
})
}
// test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
ctx := context.Background()
checkFiles := 1000
// create random test input files
inputRoot := t.TempDir()
input := filepath.Join(inputRoot, archiveName)
require.NoError(t, os.Mkdir(input, 0777))
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
// Create the archive
output := t.TempDir()
zipFile := path.Join(output, archiveName)
archiveFn(t, zipFile, input)
// Check the archive itself
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
// Now check a subdirectory
fis, err := os.ReadDir(input)
require.NoError(t, err)
subDir := "NOT FOUND"
aFile := "NOT FOUND"
for _, fi := range fis {
if fi.IsDir() {
subDir = fi.Name()
} else {
aFile = fi.Name()
}
}
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
// Now check a single file
fiCtx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ "+aFile))
require.NoError(t, fi.AddRule("- *"))
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
// Now check the level above
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
}
// Make sure we have the executable named
func skipIfNoExe(t *testing.T, exeName string) {
_, err := exec.LookPath(exeName)
if err != nil {
t.Skipf("%s executable not installed", exeName)
}
}
// Test creating and reading back some archives
//
// Note that this uses rclone and zip as external binaries.
func TestArchiveZip(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "zip")
skipIfNoExe(t, "rclone")
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
oldcwd, err := os.Getwd()
require.NoError(t, err)
require.NoError(t, os.Chdir(input))
defer func() {
require.NoError(t, os.Chdir(oldcwd))
}()
run(t, "zip", "-9r", output, ".")
})
}
// Test creating and reading back some archives
//
// Note that this uses rclone and squashfs as external binaries.
func TestArchiveSquashfs(t *testing.T) {
fstest.Initialise()
skipIfNoExe(t, "mksquashfs")
skipIfNoExe(t, "rclone")
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
run(t, "mksquashfs", input, output)
})
}

View File

@@ -1,67 +0,0 @@
//go:build !plan9
// Test Archive filesystem interface
package archive_test
import (
"testing"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
var (
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
// In these tests we receive objects from the underlying remote which don't implement these methods
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestLocal(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := t.TempDir()
name := "TestArchiveLocal"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
remote := ":memory:"
name := "TestArchiveMemory"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "archive"},
{Name: name, Key: "remote", Value: remote},
},
QuickTestOK: true,
UnimplementableFsMethods: unimplementableFsMethods,
UnimplementableObjectMethods: unimplementableObjectMethods,
})
}

View File

@@ -1,7 +0,0 @@
// Build for archive for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements a backend to access archive files in a remote
package archive

View File

@@ -1,24 +0,0 @@
// Package archiver registers all the archivers
package archiver
import (
"context"
"github.com/rclone/rclone/fs"
)
// Archiver describes an archive package
type Archiver struct {
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
Extension string
}
// Archivers is a slice of all registered archivers
var Archivers []Archiver
// Register adds the archivers provided to the list of known archivers
func Register(as ...Archiver) {
Archivers = append(Archivers, as...)
}

View File

@@ -1,233 +0,0 @@
// Package base is a base archive Fs
package base
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/vfs"
)
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // archive object
remote string // remote of the archive object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
VFS := vfs.New(wrappedFs, nil)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return f.name
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return nil, errNotImplemented
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
return nil, errNotImplemented
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return -1
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return time.Now()
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
return nil, errNotImplemented
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,165 +0,0 @@
package squashfs
// Could just be using bare object Open with RangeRequest which
// would transfer the minimum amount of data but may be slower.
import (
"errors"
"fmt"
"io/fs"
"os"
"sync"
"github.com/diskfs/go-diskfs/backend"
"github.com/rclone/rclone/vfs"
)
// Cache file handles for accessing the file
type cache struct {
node vfs.Node
fhsMu sync.Mutex
fhs []cacheHandle
}
// A cached file handle
type cacheHandle struct {
offset int64
fh vfs.Handle
}
// Make a new cache
func newCache(node vfs.Node) *cache {
return &cache{
node: node,
}
}
// Get a vfs.Handle from the pool or open one
//
// This tries to find an open file handle which doesn't require seeking.
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
if len(c.fhs) > 0 {
// Look for exact match first
for i, cfh := range c.fhs {
if cfh.offset == off {
// fs.Debugf(nil, "CACHE MATCH")
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
return cfh.fh, nil
}
}
// fs.Debugf(nil, "CACHE MISS")
// Just take the first one if not found
cfh := c.fhs[0]
c.fhs = c.fhs[1:]
return cfh.fh, nil
}
fh, err = c.node.Open(os.O_RDONLY)
if err != nil {
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
}
return fh, nil
}
// Close a vfs.Handle or return it to the pool
//
// off should be the offset the file handle would read from without seeking
func (c *cache) close(fh vfs.Handle, off int64) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
c.fhs = append(c.fhs, cacheHandle{
offset: off,
fh: fh,
})
}
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
// error encountered.
//
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
//
// Even if ReadAt returns n < len(p), it may use all of p as scratch
// space during the call. If some data is available but not len(p) bytes,
// ReadAt blocks until either all the data is available or an error occurs.
// In this respect ReadAt is different from Read.
//
// If the n = len(p) bytes returned by ReadAt are at the end of the input
// source, ReadAt may return either err == EOF or err == nil.
//
// If ReadAt is reading from an input source with a seek offset, ReadAt should
// not affect nor be affected by the underlying seek offset.
//
// Clients of ReadAt can execute parallel ReadAt calls on the same input
// source.
//
// Implementations must not retain p.
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
fh, err := c.open(off)
if err != nil {
return n, err
}
defer func() {
c.close(fh, off+int64(len(p)))
}()
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
return fh.ReadAt(p, off)
}
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
// WriteAt method dummy stub to satisfy interface
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
return 0, errCacheNotImplemented
}
// Seek method dummy stub to satisfy interface
func (c *cache) Seek(offset int64, whence int) (int64, error) {
return 0, errCacheNotImplemented
}
// Read method dummy stub to satisfy interface
func (c *cache) Read(p []byte) (n int, err error) {
return 0, errCacheNotImplemented
}
func (c *cache) Stat() (fs.FileInfo, error) {
return nil, errCacheNotImplemented
}
// Close the file
func (c *cache) Close() (err error) {
c.fhsMu.Lock()
defer c.fhsMu.Unlock()
// Close any open file handles
for i := range c.fhs {
fh := &c.fhs[i]
newErr := fh.fh.Close()
if err == nil {
err = newErr
}
}
c.fhs = nil
return err
}
// Sys returns OS-specific file for ioctl calls via fd
func (c *cache) Sys() (*os.File, error) {
return nil, errCacheNotImplemented
}
// Writable returns file for read-write operations
func (c *cache) Writable() (backend.WritableFile, error) {
return nil, errCacheNotImplemented
}
// check interfaces
var _ backend.Storage = (*cache)(nil)

View File

@@ -1,446 +0,0 @@
// Package squashfs implements a squashfs archiver for the archive backend
package squashfs
import (
"context"
"fmt"
"io"
"path"
"strings"
"time"
"github.com/diskfs/go-diskfs/filesystem/squashfs"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".sqfs",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
sqfs *squashfs.FileSystem // interface to the squashfs
c *cache
node vfs.Node // squashfs file object - set if reading
remote string // remote of the squashfs file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
c := newCache(node)
// FIXME blocksize
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
if err != nil {
return nil, fmt.Errorf("failed to read squashfs: %w", err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
sqfs: sqfs,
c: c,
remote: remote,
root: strings.Trim(root, "/"),
prefix: prefix,
prefixSlash: prefix + "/",
}
if prefix == "" {
f.prefixSlash = ""
}
singleObject := false
// Find the directory the root points to
if f.root != "" && !strings.HasSuffix(root, "/") {
native, err := f.toNative("")
if err == nil {
native = strings.TrimRight(native, "/")
_, err := f.newObjectNative(native)
if err == nil {
// If it pointed to a file, find the directory above
f.root = path.Dir(f.root)
if f.root == "." || f.root == "/" {
f.root = ""
}
}
}
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gsquashfs
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Squashfs %q", f.name)
}
// This turns a remote into a native path in the squashfs starting with a /
func (f *Fs) toNative(remote string) (string, error) {
native := strings.Trim(remote, "/")
if f.prefix == "" {
native = "/" + native
} else if native == f.prefix {
native = "/"
} else if !strings.HasPrefix(native, f.prefixSlash) {
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
} else {
native = native[len(f.prefix):]
}
if f.root != "" {
native = "/" + f.root + native
}
return native, nil
}
// Turn a (nativeDir, leaf) into a remote
func (f *Fs) fromNative(nativeDir string, leaf string) string {
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
dir := nativeDir
if f.root != "" {
dir = strings.TrimPrefix(dir, "/"+f.root)
}
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
return remote
}
// Convert a FileInfo into an Object from native dir
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
return &Object{
fs: f,
remote: f.fromNative(nativeDir, item.Name()),
size: item.Size(),
modTime: item.ModTime(),
item: item,
}
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
nativeDir, err := f.toNative(dir)
if err != nil {
return nil, err
}
items, err := f.sqfs.ReadDir(nativeDir)
if err != nil {
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
}
entries = make(fs.DirEntries, 0, len(items))
for _, fi := range items {
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
// fs.Debugf(item.Name(), "entry = %#v", item)
var entry fs.DirEntry
if err != nil {
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
}
if item.IsDir() {
var remote = f.fromNative(nativeDir, item.Name())
entry = fs.NewDir(remote, item.ModTime())
} else {
if item.Mode().IsRegular() {
entry = f.objectFromFileInfo(nativeDir, item)
} else {
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
continue
}
}
entries = append(entries, entry)
}
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// newObjectNative finds the object at the native path passed in
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
// get the path and filename
dir, leaf := path.Split(nativePath)
dir = strings.TrimRight(dir, "/")
leaf = strings.Trim(leaf, "/")
// FIXME need to detect directory not found
fis, err := f.sqfs.ReadDir(dir)
if err != nil {
return nil, fs.ErrorObjectNotFound
}
for _, fi := range fis {
if fi.Name() == leaf {
if fi.IsDir() {
return nil, fs.ErrorNotAFile
}
item, ok := fi.(squashfs.FileStat)
if !ok {
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
}
o = f.objectFromFileInfo(dir, item)
break
}
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return o, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
nativePath, err := f.toNative(remote)
if err != nil {
return nil, err
}
return f.newObjectNative(nativePath)
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw squashfs file
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
item squashfs.FileStat
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Turn a squashfs path into a full path for the parent Fs
// func (o *Object) path(remote string) string {
// return path.Join(o.fs.prefix, remote)
// }
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return o.size
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
remote, err := o.fs.toNative(o.remote)
if err != nil {
return nil, err
}
fs.Debugf(o, "Opening %q", remote)
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
fh, err := o.item.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = fh.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
return readers.NewLimitedReadCloser(fh, limit), nil
}
return fh, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -1,385 +0,0 @@
// Package zip implements a zip archiver for the archive backend
package zip
import (
"archive/zip"
"context"
"errors"
"fmt"
"io"
"os"
"path"
"strings"
"time"
"github.com/rclone/rclone/backend/archive/archiver"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfscommon"
)
func init() {
archiver.Register(archiver.Archiver{
New: New,
Extension: ".zip",
})
}
// Fs represents a wrapped fs.Fs
type Fs struct {
f fs.Fs
wrapper fs.Fs
name string
features *fs.Features // optional features
vfs *vfs.VFS
node vfs.Node // zip file object - set if reading
remote string // remote of the zip file object
prefix string // position for objects
prefixSlash string // position for objects with a slash on
root string // position to read from within the archive
dt dirtree.DirTree // read from zipfile
}
// New constructs an Fs from the (wrappedFs, remote) with the objects
// prefix with prefix and rooted at root
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
// FIXME vfs cache?
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
vfsOpt := vfscommon.Opt
vfsOpt.ReadWait = 0
VFS := vfs.New(wrappedFs, &vfsOpt)
node, err := VFS.Stat(remote)
if err != nil {
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
}
f := &Fs{
f: wrappedFs,
name: path.Join(fs.ConfigString(wrappedFs), remote),
vfs: VFS,
node: node,
remote: remote,
root: root,
prefix: prefix,
prefixSlash: prefix + "/",
}
// Read the contents of the zip file
singleObject, err := f.readZip()
if err != nil {
return nil, fmt.Errorf("failed to open zip file: %w", err)
}
// FIXME
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
//
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
f.features = (&fs.Features{
CaseInsensitive: false,
DuplicateFiles: false,
ReadMimeType: false, // MimeTypes not supported with gzip
WriteMimeType: false,
BucketBased: false,
CanHaveEmptyDirectories: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
if singleObject {
return f, fs.ErrorIsFile
}
return f, nil
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// String returns a description of the FS
func (f *Fs) String() string {
return fmt.Sprintf("Zip %q", f.name)
}
// readZip the zip file into f
//
// Returns singleObject=true if f.root points to a file
func (f *Fs) readZip() (singleObject bool, err error) {
if f.node == nil {
return singleObject, fs.ErrorDirNotFound
}
size := f.node.Size()
if size < 0 {
return singleObject, errors.New("can't read from zip file with unknown size")
}
r, err := f.node.Open(os.O_RDONLY)
if err != nil {
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
}
zr, err := zip.NewReader(r, size)
if err != nil {
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
}
dt := dirtree.New()
for _, file := range zr.File {
remote := strings.Trim(path.Clean(file.Name), "/")
if remote == "." {
remote = ""
}
remote = path.Join(f.prefix, remote)
if f.root != "" {
// Ignore all files outside the root
if !strings.HasPrefix(remote, f.root) {
continue
}
if remote == f.root {
remote = ""
} else {
remote = strings.TrimPrefix(remote, f.root+"/")
}
}
if strings.HasSuffix(file.Name, "/") {
dir := fs.NewDir(remote, file.Modified)
dt.AddDir(dir)
} else {
if remote == "" {
remote = path.Base(f.root)
singleObject = true
dt = dirtree.New()
}
o := &Object{
f: f,
remote: remote,
fh: &file.FileHeader,
file: file,
}
dt.Add(o)
if singleObject {
break
}
}
}
dt.CheckParents("")
dt.Sort()
f.dt = dt
//fs.Debugf(nil, "dt = %v", dt)
return singleObject, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
// _, err = f.strip(dir)
// if err != nil {
// return nil, err
// }
entries, ok := f.dt[dir]
if !ok {
return nil, fs.ErrorDirNotFound
}
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
return entries, nil
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
if f.dt == nil {
return nil, fs.ErrorObjectNotFound
}
_, entry := f.dt.Find(remote)
if entry == nil {
return nil, fs.ErrorObjectNotFound
}
o, ok := entry.(*Object)
if !ok {
return nil, fs.ErrorNotAFile
}
return o, nil
}
// Precision of the ModTimes in this Fs
func (f *Fs) Precision() time.Duration {
return time.Second
}
// Mkdir makes the directory (container, bucket)
//
// Shouldn't return an error if it already exists
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
return vfs.EROFS
}
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
return nil, vfs.EROFS
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.CRC32)
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.f
}
// WrapFs returns the Fs that is wrapping this Fs
func (f *Fs) WrapFs() fs.Fs {
return f.wrapper
}
// SetWrapper sets the Fs that is wrapping this Fs
func (f *Fs) SetWrapper(wrapper fs.Fs) {
f.wrapper = wrapper
}
// Object describes an object to be read from the raw zip file
type Object struct {
f *Fs
remote string
fh *zip.FileHeader
file *zip.File
}
// Fs returns read only access to the Fs that this object is part of
func (o *Object) Fs() fs.Info {
return o.f
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Remote()
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Size returns the size of the file
func (o *Object) Size() int64 {
return int64(o.fh.UncompressedSize64)
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.fh.Modified
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return vfs.EROFS
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
if ht == hash.CRC32 {
// FIXME return empty CRC if writing
if o.f.dt == nil {
return "", nil
}
return fmt.Sprintf("%08x", o.fh.CRC32), nil
}
return "", hash.ErrUnsupported
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
rc, err = o.file.Open()
if err != nil {
return nil, err
}
// discard data from start as necessary
if offset > 0 {
_, err = io.CopyN(io.Discard, rc, offset)
if err != nil {
return nil, err
}
}
// If limited then don't return everything
if limit >= 0 {
return readers.NewLimitedReadCloser(rc, limit), nil
}
return rc, nil
}
// Update in to the object with the modTime given of the given size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
return vfs.EROFS
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return vfs.EROFS
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -56,7 +56,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/readers"
@@ -844,32 +843,15 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
//
// This should return ErrDirNotFound if the directory isn't found.
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
var entries fs.DirEntries
subDirClient := f.dirClient(dir)
// Checking whether directory exists
_, err := subDirClient.GetProperties(ctx, nil)
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
return fs.ErrorDirNotFound
return entries, fs.ErrorDirNotFound
} else if err != nil {
return err
return entries, err
}
opt := &directory.ListFilesAndDirectoriesOptions{
@@ -881,7 +863,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
for pager.More() {
resp, err := pager.NextPage(ctx)
if err != nil {
return err
return entries, err
}
for _, directory := range resp.Segment.Directories {
// Name *string `xml:"Name"`
@@ -907,10 +889,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if directory.Properties.ContentLength != nil {
entry.SetSize(*directory.Properties.ContentLength)
}
err = list.Add(entry)
if err != nil {
return err
}
entries = append(entries, entry)
}
for _, file := range resp.Segment.Files {
leaf := f.opt.Enc.ToStandardPath(*file.Name)
@@ -924,13 +903,10 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if file.Properties.LastWriteTime != nil {
entry.modTime = *file.Properties.LastWriteTime
}
err = list.Add(entry)
if err != nil {
return err
}
entries = append(entries, entry)
}
}
return list.Flush()
return entries, nil
}
// ------------------------------------------------------------
@@ -1474,7 +1450,6 @@ var (
_ fs.DirMover = &Fs{}
_ fs.Copier = &Fs{}
_ fs.OpenWriterAter = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -37,7 +37,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
@@ -706,27 +705,9 @@ OUTER:
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
@@ -736,22 +717,14 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
err = list.Add(d)
if err != nil {
iErr = err
return true
}
entries = append(entries, d)
} else if info.Type == api.ItemTypeFile {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
err = list.Add(o)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
@@ -767,12 +740,12 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
return false
})
if err != nil {
return err
return nil, err
}
if iErr != nil {
return iErr
return nil, iErr
}
return list.Flush()
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
@@ -1768,7 +1741,6 @@ var (
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)

View File

@@ -1,4 +1,5 @@
//go:build !plan9 && !js
// +build !plan9,!js
package cache

View File

@@ -1965,28 +1965,9 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
entriesAdded := 0
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
return nil, err
}
directoryID = actualID(directoryID)
@@ -1998,30 +1979,25 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
return true
}
if entry != nil {
err = list.Add(entry)
if err != nil {
iErr = err
return true
}
entriesAdded++
entries = append(entries, entry)
}
return false
})
if err != nil {
return err
return nil, err
}
if iErr != nil {
return iErr
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" {
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
return nil, err
}
}
return list.Flush()
return entries, nil
}
// listREntry is a task to be executed by a litRRunner
@@ -4641,7 +4617,6 @@ var (
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
_ fs.DirSetModTimer = (*Fs)(nil)
_ fs.MkdirMetadataer = (*Fs)(nil)

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
@@ -835,7 +834,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// listSharedFolders lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
@@ -848,7 +847,7 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return err
return nil, err
}
started = true
} else {
@@ -860,15 +859,15 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("list continue: %w", err)
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
err = callback(d)
entries = append(entries, d)
if err != nil {
return err
return nil, err
}
}
if res.Cursor == "" {
@@ -876,26 +875,22 @@ func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) e
}
}
return nil
return entries, nil
}
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
errFoundFile := errors.New("found file")
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error {
if entry.(*fs.Dir).Remote() == name {
id = entry.(*fs.Dir).ID()
return errFoundFile
}
return nil
})
if errors.Is(err, errFoundFile) {
return id, nil
} else if err != nil {
entries, err := f.listSharedFolders(ctx)
if err != nil {
return "", err
}
for _, entry := range entries {
if entry.(*fs.Dir).Remote() == name {
return entry.(*fs.Dir).ID(), nil
}
}
return "", fs.ErrorDirNotFound
}
@@ -913,7 +908,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
// listReceivedFiles lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
@@ -926,7 +921,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return err
return nil, err
}
started = true
} else {
@@ -938,7 +933,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
return shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("list continue: %w", err)
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -951,34 +946,27 @@ func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) e
modTime: *entry.TimeInvited,
}
if err != nil {
return err
}
err = callback(o)
if err != nil {
return err
return nil, err
}
entries = append(entries, o)
}
if res.Cursor == "" {
break
}
}
return nil
return entries, nil
}
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
errFoundFile := errors.New("found file")
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error {
if entry.(*Object).remote == name {
o = entry.(*Object)
return errFoundFile
}
return nil
})
if errors.Is(err, errFoundFile) {
return o, nil
} else if err != nil {
files, err := f.listReceivedFiles(ctx)
if err != nil {
return nil, err
}
for _, entry := range files {
if entry.(*Object).remote == name {
return entry.(*Object), nil
}
}
return nil, fs.ErrorObjectNotFound
}
@@ -992,37 +980,11 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
if f.opt.SharedFiles {
err := f.listReceivedFiles(ctx, list.Add)
if err != nil {
return err
}
return list.Flush()
return f.listReceivedFiles(ctx)
}
if f.opt.SharedFolders {
err := f.listSharedFolders(ctx, list.Add)
if err != nil {
return err
}
return list.Flush()
return f.listSharedFolders(ctx)
}
root := f.slashRoot
@@ -1052,7 +1014,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
err = fs.ErrorDirNotFound
}
}
return err
return nil, err
}
started = true
} else {
@@ -1064,7 +1026,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
return shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("list continue: %w", err)
return nil, fmt.Errorf("list continue: %w", err)
}
}
for _, entry := range res.Entries {
@@ -1089,20 +1051,14 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
err = list.Add(d)
if err != nil {
return err
}
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
if err != nil {
return err
return nil, err
}
if o.(*Object).exportType.listable() {
err = list.Add(o)
if err != nil {
return err
}
entries = append(entries, o)
}
}
}
@@ -1110,7 +1066,7 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (
break
}
}
return list.Flush()
return entries, nil
}
// Put the object
@@ -2131,7 +2087,6 @@ var (
_ fs.Mover = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)

View File

@@ -456,7 +456,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
}
}()
baseDialer := fshttp.NewDialer(ctx)
if f.opt.SocksProxy != "" || f.proxyURL != nil {
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
} else if f.proxyURL != nil {
// We need to make the onward connection to f.opt.Host. However the FTP
// library sets the host to the proxy IP after using EPSV or PASV so we need
// to correct that here.
@@ -466,11 +468,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return nil, err
}
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
if f.opt.SocksProxy != "" {
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
} else {
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
}
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
} else {
conn, err = baseDialer.Dial(network, address)
}

View File

@@ -1134,15 +1134,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
remote: remote,
}
// Set the storage class for the destination object if configured
var dstObject *storage.Object
if f.opt.StorageClass != "" {
dstObject = &storage.Object{
StorageClass: f.opt.StorageClass,
}
}
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, dstObject)
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
if !f.opt.BucketPolicyOnly {
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
}
@@ -1430,10 +1422,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: fs.MimeType(ctx, src),
Metadata: metadataFromModTime(modTime),
}
// Set the storage class from config if configured
if o.fs.opt.StorageClass != "" {
object.StorageClass = o.fs.opt.StorageClass
}
// Apply upload options
for _, option := range options {
key, value := option.Header()

View File

@@ -497,6 +497,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
f.dirCache.FlushDir(dir)
if err != nil {
return err
}
return nil
}

View File

@@ -115,17 +115,6 @@ points, as you explicitly acknowledge that they should be skipped.`,
NoPrefix: true,
Advanced: true,
},
{
Name: "skip_specials",
Help: `Don't warn about skipped pipes, sockets and device objects.
This flag disables warning messages on skipped pipes, sockets and
device objects, as you explicitly acknowledge that they should be
skipped.`,
Default: false,
NoPrefix: true,
Advanced: true,
},
{
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
@@ -339,7 +328,6 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
SkipSpecials bool `config:"skip_specials"`
UTFNorm bool `config:"unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
@@ -1258,9 +1246,7 @@ func (o *Object) Storable() bool {
}
return false
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
if !o.fs.opt.SkipSpecials {
fs.Logf(o, "Can't transfer non file/directory")
}
fs.Logf(o, "Can't transfer non file/directory")
return false
} else if mode&os.ModeDir != 0 {
// fs.Debugf(o, "Skipping directory")

View File

@@ -1,4 +1,4 @@
//go:build dragonfly || plan9 || js || aix
//go:build dragonfly || plan9 || js
package local

View File

@@ -1377,27 +1377,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
return nil, err
}
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
entry, err := f.itemToDirEntry(ctx, dir, info)
@@ -1407,16 +1389,13 @@ func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) e
if entry == nil {
return nil
}
err = list.Add(entry)
if err != nil {
return err
}
entries = append(entries, entry)
return nil
})
if err != nil {
return err
return nil, err
}
return list.Flush()
return entries, nil
}
// ListR lists the objects and directories of the Fs starting
@@ -3044,7 +3023,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}

View File

@@ -629,31 +629,11 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := list.NewHelper(callback)
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
return list.Add(o)
entries = append(entries, o)
return nil
})
if err != nil {
return err
}
return list.Flush()
return entries, err
}
// ListR lists the objects and directories of the Fs starting
@@ -1397,8 +1377,6 @@ var (
_ fs.DirMover = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.Object = (*Object)(nil)

View File

@@ -1,194 +0,0 @@
## Adding a new s3 provider
It is quite easy to add a new S3 provider to rclone.
You'll then need to do add the following (optional tags are in [] and
do not get displayed in rclone config if empty):
The process is as follows: Create yaml -> add docs -> run tests ->
adjust yaml until tests pass.
All tags can be found in `backend/s3/providers.go` Provider Struct.
Looking through a few of the yaml files as examples should make things
clear. `AWS.yaml` as the most config. pasting.
### YAML
In `backend/s3/provider/YourProvider.yaml`
- name
- description
- More like the full name often "YourProvider + Object Storage"
- [Region]
- Any regions your provider supports or the defaults (use `region: {}` for this)
- Example from AWS.yaml:
```yaml
region:
us-east-1: |-
The default endpoint - a good choice if you are unsure.
US Region, Northern Virginia, or Pacific Northwest.
Leave location constraint empty.
```
- The defaults (as seen in Rclone.yaml):
```yaml
region:
"": |-
Use this if unsure.
Will use v4 signatures and an empty region.
other-v2-signature: |-
Use this only if v4 signatures don't work.
E.g. pre Jewel/v10 CEPH.
```
- [Endpoint]
- Any endpoints your provider supports
- Example from Mega.yaml
```yaml
endpoint:
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
```
- [Location Constraint]
- The Location Constraint of your remote, often same as region.
- Example from AWS.yaml
```yaml
location_constraint:
"": Empty for US Region, Northern Virginia, or Pacific Northwest
us-east-2: US East (Ohio) Region
```
- [ACL]
- Identical across *most* providers. Select the default with `acl: {}`
- Example from AWS.yaml
```yaml
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
Granting this on a bucket is generally not recommended.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
bucket-owner-read: |-
Object owner gets FULL_CONTROL.
Bucket owner gets READ access.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
bucket-owner-full-control: |-
Both the object owner and the bucket owner get FULL_CONTROL over the object.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
```
- [Storage Class]
- Identical across *most* providers.
- Defaults from AWS.yaml
```yaml
storage_class:
"": Default
STANDARD: Standard storage class
REDUCED_REDUNDANCY: Reduced redundancy storage class
STANDARD_IA: Standard Infrequent Access storage class
ONEZONE_IA: One Zone Infrequent Access storage class
GLACIER: Glacier Flexible Retrieval storage class
DEEP_ARCHIVE: Glacier Deep Archive storage class
INTELLIGENT_TIERING: Intelligent-Tiering storage class
GLACIER_IR: Glacier Instant Retrieval storage class
```
- [Server Side Encryption]
- Not common, identical across *most* providers.
- Defaults from AWS.yaml
```yaml
server_side_encryption:
"": None
AES256: AES256
aws:kms: aws:kms
```
- [Advanced Options]
- All advanced options are Boolean - if true the configurator asks about that value, if not it doesn't:
```go
BucketACL bool `yaml:"bucket_acl,omitempty"`
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
RequesterPays bool `yaml:"requester_pays,omitempty"`
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
```
- Example from AWS.yaml:
```yaml
bucket_acl: true
directory_bucket: true
leave_parts_on_error: true
requester_pays: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
sts_endpoint: true
use_accelerate_endpoint: true
```
- Quirks
- Quirks are discovered through documentation and running the tests as seen below.
- Most quirks are *bool as to have 3 values, `true`, `false` and `dont care`.
```go
type Quirks struct {
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
MightGzip *bool `yaml:"might_gzip,omitempty"`
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
UseXID *bool `yaml:"use_x_id,omitempty"`
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
}
```
- Example from AWS.yaml
```yaml
quirks:
might_gzip: false # Never auto gzips objects
use_unsigned_payload: false # AWS has trailer support
```
Note that if you omit a section, eg `region` then the user won't be
asked that question, and if you add an empty section e.g. `region: {}`
then the defaults from the `Other.yaml` will be used.
### DOCS
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Make sure this is in alphabetical order in the `Providers` section.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- Rule of thumb: don't edit anything not mentioned above.
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- This will make autogenerated changes!
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
### TESTS
Once you've written the code, test `rclone config` works to your
satisfaction and looks correct, and check the integration tests work
`go test -v -remote NewS3Provider:`. You may need to adjust the quirks
to get them to pass. Some providers just can't pass the tests with
control characters in the names so if these fail and the provider
doesn't support `urlEncodeListings` in the quirks then ignore them.

View File

@@ -1,139 +0,0 @@
name: AWS
description: Amazon Web Services (AWS) S3
region:
us-east-1: |-
The default endpoint - a good choice if you are unsure.
US Region, Northern Virginia, or Pacific Northwest.
Leave location constraint empty.
us-east-2: |-
US East (Ohio) Region.
Needs location constraint us-east-2.
us-west-1: |-
US West (Northern California) Region.
Needs location constraint us-west-1.
us-west-2: |-
US West (Oregon) Region.
Needs location constraint us-west-2.
ca-central-1: |-
Canada (Central) Region.
Needs location constraint ca-central-1.
eu-west-1: |-
EU (Ireland) Region.
Needs location constraint EU or eu-west-1.
eu-west-2: |-
EU (London) Region.
Needs location constraint eu-west-2.
eu-west-3: |-
EU (Paris) Region.
Needs location constraint eu-west-3.
eu-north-1: |-
EU (Stockholm) Region.
Needs location constraint eu-north-1.
eu-south-1: |-
EU (Milan) Region.
Needs location constraint eu-south-1.
eu-central-1: |-
EU (Frankfurt) Region.
Needs location constraint eu-central-1.
ap-southeast-1: |-
Asia Pacific (Singapore) Region.
Needs location constraint ap-southeast-1.
ap-southeast-2: |-
Asia Pacific (Sydney) Region.
Needs location constraint ap-southeast-2.
ap-northeast-1: |-
Asia Pacific (Tokyo) Region.
Needs location constraint ap-northeast-1.
ap-northeast-2: |-
Asia Pacific (Seoul).
Needs location constraint ap-northeast-2.
ap-northeast-3: |-
Asia Pacific (Osaka-Local).
Needs location constraint ap-northeast-3.
ap-south-1: |-
Asia Pacific (Mumbai).
Needs location constraint ap-south-1.
ap-east-1: |-
Asia Pacific (Hong Kong) Region.
Needs location constraint ap-east-1.
sa-east-1: |-
South America (Sao Paulo) Region.
Needs location constraint sa-east-1.
il-central-1: |-
Israel (Tel Aviv) Region.
Needs location constraint il-central-1.
me-south-1: |-
Middle East (Bahrain) Region.
Needs location constraint me-south-1.
af-south-1: |-
Africa (Cape Town) Region.
Needs location constraint af-south-1.
cn-north-1: |-
China (Beijing) Region.
Needs location constraint cn-north-1.
cn-northwest-1: |-
China (Ningxia) Region.
Needs location constraint cn-northwest-1.
us-gov-east-1: |-
AWS GovCloud (US-East) Region.
Needs location constraint us-gov-east-1.
us-gov-west-1: |-
AWS GovCloud (US) Region.
Needs location constraint us-gov-west-1.
endpoint: {}
location_constraint:
'': Empty for US Region, Northern Virginia, or Pacific Northwest
us-east-2: US East (Ohio) Region
us-west-1: US West (Northern California) Region
us-west-2: US West (Oregon) Region
ca-central-1: Canada (Central) Region
eu-west-1: EU (Ireland) Region
eu-west-2: EU (London) Region
eu-west-3: EU (Paris) Region
eu-north-1: EU (Stockholm) Region
eu-south-1: EU (Milan) Region
EU: EU Region
ap-southeast-1: Asia Pacific (Singapore) Region
ap-southeast-2: Asia Pacific (Sydney) Region
ap-northeast-1: Asia Pacific (Tokyo) Region
ap-northeast-2: Asia Pacific (Seoul) Region
ap-northeast-3: Asia Pacific (Osaka-Local) Region
ap-south-1: Asia Pacific (Mumbai) Region
ap-east-1: Asia Pacific (Hong Kong) Region
sa-east-1: South America (Sao Paulo) Region
il-central-1: Israel (Tel Aviv) Region
me-south-1: Middle East (Bahrain) Region
af-south-1: Africa (Cape Town) Region
cn-north-1: China (Beijing) Region
cn-northwest-1: China (Ningxia) Region
us-gov-east-1: AWS GovCloud (US-East) Region
us-gov-west-1: AWS GovCloud (US) Region
acl: {}
storage_class:
'': Default
STANDARD: Standard storage class
REDUCED_REDUNDANCY: Reduced redundancy storage class
STANDARD_IA: Standard Infrequent Access storage class
ONEZONE_IA: One Zone Infrequent Access storage class
GLACIER: Glacier Flexible Retrieval storage class
DEEP_ARCHIVE: Glacier Deep Archive storage class
INTELLIGENT_TIERING: Intelligent-Tiering storage class
GLACIER_IR: Glacier Instant Retrieval storage class
server_side_encryption:
'': None
AES256: AES256
aws:kms: aws:kms
bucket_acl: true
directory_bucket: true
leave_parts_on_error: true
requester_pays: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
sts_endpoint: true
use_accelerate_endpoint: true
quirks:
might_gzip: false # Never auto gzips objects
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking

View File

@@ -1,37 +0,0 @@
name: Alibaba
description: Alibaba Cloud Object Storage System (OSS) formerly Aliyun
endpoint:
oss-accelerate.aliyuncs.com: Global Accelerate
oss-accelerate-overseas.aliyuncs.com: Global Accelerate (outside mainland China)
oss-cn-hangzhou.aliyuncs.com: East China 1 (Hangzhou)
oss-cn-shanghai.aliyuncs.com: East China 2 (Shanghai)
oss-cn-qingdao.aliyuncs.com: North China 1 (Qingdao)
oss-cn-beijing.aliyuncs.com: North China 2 (Beijing)
oss-cn-zhangjiakou.aliyuncs.com: North China 3 (Zhangjiakou)
oss-cn-huhehaote.aliyuncs.com: North China 5 (Hohhot)
oss-cn-wulanchabu.aliyuncs.com: North China 6 (Ulanqab)
oss-cn-shenzhen.aliyuncs.com: South China 1 (Shenzhen)
oss-cn-heyuan.aliyuncs.com: South China 2 (Heyuan)
oss-cn-guangzhou.aliyuncs.com: South China 3 (Guangzhou)
oss-cn-chengdu.aliyuncs.com: West China 1 (Chengdu)
oss-cn-hongkong.aliyuncs.com: Hong Kong (Hong Kong)
oss-us-west-1.aliyuncs.com: US West 1 (Silicon Valley)
oss-us-east-1.aliyuncs.com: US East 1 (Virginia)
oss-ap-southeast-1.aliyuncs.com: Southeast Asia Southeast 1 (Singapore)
oss-ap-southeast-2.aliyuncs.com: Asia Pacific Southeast 2 (Sydney)
oss-ap-southeast-3.aliyuncs.com: Southeast Asia Southeast 3 (Kuala Lumpur)
oss-ap-southeast-5.aliyuncs.com: Asia Pacific Southeast 5 (Jakarta)
oss-ap-northeast-1.aliyuncs.com: Asia Pacific Northeast 1 (Japan)
oss-ap-south-1.aliyuncs.com: Asia Pacific South 1 (Mumbai)
oss-eu-central-1.aliyuncs.com: Central Europe 1 (Frankfurt)
oss-eu-west-1.aliyuncs.com: West Europe (London)
oss-me-east-1.aliyuncs.com: Middle East 1 (Dubai)
acl: {}
storage_class:
'': Default
STANDARD: Standard storage class
GLACIER: Archive storage mode
STANDARD_IA: Infrequent access storage mode
bucket_acl: true
quirks:
use_multipart_etag: false # multipar ETags differ from AWS

View File

@@ -1,19 +0,0 @@
name: ArvanCloud
description: Arvan Cloud Object Storage (AOS)
endpoint:
s3.ir-thr-at1.arvanstorage.ir: |-
The default endpoint - a good choice if you are unsure.
Tehran Iran (Simin)
s3.ir-tbz-sh1.arvanstorage.ir: Tabriz Iran (Shahriar)
location_constraint:
ir-thr-at1: Tehran Iran (Simin)
ir-tbz-sh1: Tabriz Iran (Shahriar)
acl: {}
storage_class:
STANDARD: Standard storage class
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_already_exists: false

View File

@@ -1,20 +0,0 @@
name: Ceph
description: Ceph Object Storage
region: {}
endpoint: {}
location_constraint: {}
acl: {}
server_side_encryption:
'': None
AES256: AES256
aws:kms: aws:kms
bucket_acl: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false

View File

@@ -1,98 +0,0 @@
name: ChinaMobile
description: China Mobile Ecloud Elastic Object Storage (EOS)
endpoint:
eos-wuxi-1.cmecloud.cn: |-
The default endpoint - a good choice if you are unsure.
East China (Suzhou)
eos-jinan-1.cmecloud.cn: East China (Jinan)
eos-ningbo-1.cmecloud.cn: East China (Hangzhou)
eos-shanghai-1.cmecloud.cn: East China (Shanghai-1)
eos-zhengzhou-1.cmecloud.cn: Central China (Zhengzhou)
eos-hunan-1.cmecloud.cn: Central China (Changsha-1)
eos-zhuzhou-1.cmecloud.cn: Central China (Changsha-2)
eos-guangzhou-1.cmecloud.cn: South China (Guangzhou-2)
eos-dongguan-1.cmecloud.cn: South China (Guangzhou-3)
eos-beijing-1.cmecloud.cn: North China (Beijing-1)
eos-beijing-2.cmecloud.cn: North China (Beijing-2)
eos-beijing-4.cmecloud.cn: North China (Beijing-3)
eos-huhehaote-1.cmecloud.cn: North China (Huhehaote)
eos-chengdu-1.cmecloud.cn: Southwest China (Chengdu)
eos-chongqing-1.cmecloud.cn: Southwest China (Chongqing)
eos-guiyang-1.cmecloud.cn: Southwest China (Guiyang)
eos-xian-1.cmecloud.cn: Nouthwest China (Xian)
eos-yunnan.cmecloud.cn: Yunnan China (Kunming)
eos-yunnan-2.cmecloud.cn: Yunnan China (Kunming-2)
eos-tianjin-1.cmecloud.cn: Tianjin China (Tianjin)
eos-jilin-1.cmecloud.cn: Jilin China (Changchun)
eos-hubei-1.cmecloud.cn: Hubei China (Xiangyan)
eos-jiangxi-1.cmecloud.cn: Jiangxi China (Nanchang)
eos-gansu-1.cmecloud.cn: Gansu China (Lanzhou)
eos-shanxi-1.cmecloud.cn: Shanxi China (Taiyuan)
eos-liaoning-1.cmecloud.cn: Liaoning China (Shenyang)
eos-hebei-1.cmecloud.cn: Hebei China (Shijiazhuang)
eos-fujian-1.cmecloud.cn: Fujian China (Xiamen)
eos-guangxi-1.cmecloud.cn: Guangxi China (Nanning)
eos-anhui-1.cmecloud.cn: Anhui China (Huainan)
location_constraint:
wuxi1: East China (Suzhou)
jinan1: East China (Jinan)
ningbo1: East China (Hangzhou)
shanghai1: East China (Shanghai-1)
zhengzhou1: Central China (Zhengzhou)
hunan1: Central China (Changsha-1)
zhuzhou1: Central China (Changsha-2)
guangzhou1: South China (Guangzhou-2)
dongguan1: South China (Guangzhou-3)
beijing1: North China (Beijing-1)
beijing2: North China (Beijing-2)
beijing4: North China (Beijing-3)
huhehaote1: North China (Huhehaote)
chengdu1: Southwest China (Chengdu)
chongqing1: Southwest China (Chongqing)
guiyang1: Southwest China (Guiyang)
xian1: Northwest China (Xian)
yunnan: Yunnan China (Kunming)
yunnan2: Yunnan China (Kunming-2)
tianjin1: Tianjin China (Tianjin)
jilin1: Jilin China (Changchun)
hubei1: Hubei China (Xiangyan)
jiangxi1: Jiangxi China (Nanchang)
gansu1: Gansu China (Lanzhou)
shanxi1: Shanxi China (Taiyuan)
liaoning1: Liaoning China (Shenyang)
hebei1: Hebei China (Shijiazhuang)
fujian1: Fujian China (Xiamen)
guangxi1: Guangxi China (Nanning)
anhui1: Anhui China (Huainan)
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
Granting this on a bucket is generally not recommended.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
storage_class:
'': Default
STANDARD: Standard storage class
GLACIER: Archive storage mode
STANDARD_IA: Infrequent access storage mode
server_side_encryption:
'': None
AES256: AES256
bucket_acl: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_already_exists: false

View File

@@ -1,8 +0,0 @@
name: Cloudflare
description: Cloudflare R2 Storage
region:
auto: R2 buckets are automatically distributed across Cloudflare's data centers for low latency.
endpoint: {}
quirks:
force_path_style: true
use_multipart_etag: false # multipart ETags are random

View File

@@ -1,10 +0,0 @@
name: Cubbit
description: Cubbit DS3 Object Storage
region:
eu-west-1: Europe West
endpoint:
s3.cubbit.eu: Cubbit DS3 Object Storage endpoint
acl: {}
bucket_acl: true
quirks:
use_multipart_etag: false

View File

@@ -1,20 +0,0 @@
name: DigitalOcean
description: DigitalOcean Spaces
region: {}
endpoint:
syd1.digitaloceanspaces.com: DigitalOcean Spaces Sydney 1
sfo3.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 3
sfo2.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 2
fra1.digitaloceanspaces.com: DigitalOcean Spaces Frankfurt 1
nyc3.digitaloceanspaces.com: DigitalOcean Spaces New York 3
ams3.digitaloceanspaces.com: DigitalOcean Spaces Amsterdam 3
sgp1.digitaloceanspaces.com: DigitalOcean Spaces Singapore 1
lon1.digitaloceanspaces.com: DigitalOcean Spaces London 1
tor1.digitaloceanspaces.com: DigitalOcean Spaces Toronto 1
blr1.digitaloceanspaces.com: DigitalOcean Spaces Bangalore 1
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
list_url_encode: false
use_already_exists: false

View File

@@ -1,11 +0,0 @@
name: Dreamhost
description: Dreamhost DreamObjects
region: {}
endpoint:
objects-us-east-1.dream.io: Dream Objects endpoint
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
list_url_encode: false
use_already_exists: false

View File

@@ -1,9 +0,0 @@
name: Exaba
description: Exaba Object Storage
region: {}
endpoint: {}
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
force_path_style: true

View File

@@ -1,21 +0,0 @@
name: FileLu
description: FileLu S5 (S3-Compatible Object Storage)
region:
global: Global
us-east: North America (US-East)
eu-central: Europe (EU-Central)
ap-southeast: Asia Pacific (AP-Southeast)
me-central: Middle East (ME-Central)
endpoint:
s5lu.com: Global FileLu S5 endpoint
us.s5lu.com: North America (US-East) region endpoint
eu.s5lu.com: Europe (EU-Central) region endpoint
ap.s5lu.com: Asia Pacific (AP-Southeast) region endpoint
me.s5lu.com: Middle East (ME-Central) region endpoint
acl: {}
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false

View File

@@ -1,6 +0,0 @@
name: FlashBlade
description: Pure Storage FlashBlade Object Storage
endpoint: {}
quirks:
might_gzip: false # never auto-gzip
force_path_style: true # supports vhost but defaults to path-style

View File

@@ -1,20 +0,0 @@
name: GCS
description: Google Cloud Storage
region: {}
endpoint:
https://storage.googleapis.com: Google Cloud Storage endpoint
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
# Google break request Signature by mutating accept-encoding HTTP header
# https://github.com/rclone/rclone/issues/6670
use_accept_encoding_gzip: false
sign_accept_encoding: false
use_already_exists: true # returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
# GCS doesn't like the x-id URL parameter the SDKv2 inserts
use_x_id: false
# GCS S3 doesn't support multi-part server side copy:
# See: https://issuetracker.google.com/issues/323465186
# So make cutoff very large which it does seem to support
copy_cutoff: 9223372036854775807

View File

@@ -1,15 +0,0 @@
name: Hetzner
description: Hetzner Object Storage
region:
hel1: Helsinki
fsn1: Falkenstein
nbg1: Nuremberg
endpoint:
hel1.your-objectstorage.com: Helsinki
fsn1.your-objectstorage.com: Falkenstein
nbg1.your-objectstorage.com: Nuremberg
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
use_already_exists: false

View File

@@ -1,41 +0,0 @@
name: HuaweiOBS
description: Huawei Object Storage Service
region:
af-south-1: AF-Johannesburg
ap-southeast-2: AP-Bangkok
ap-southeast-3: AP-Singapore
cn-east-3: CN East-Shanghai1
cn-east-2: CN East-Shanghai2
cn-north-1: CN North-Beijing1
cn-north-4: CN North-Beijing4
cn-south-1: CN South-Guangzhou
ap-southeast-1: CN-Hong Kong
sa-argentina-1: LA-Buenos Aires1
sa-peru-1: LA-Lima1
na-mexico-1: LA-Mexico City1
sa-chile-1: LA-Santiago2
sa-brazil-1: LA-Sao Paulo1
ru-northwest-2: RU-Moscow2
endpoint:
obs.af-south-1.myhuaweicloud.com: AF-Johannesburg
obs.ap-southeast-2.myhuaweicloud.com: AP-Bangkok
obs.ap-southeast-3.myhuaweicloud.com: AP-Singapore
obs.cn-east-3.myhuaweicloud.com: CN East-Shanghai1
obs.cn-east-2.myhuaweicloud.com: CN East-Shanghai2
obs.cn-north-1.myhuaweicloud.com: CN North-Beijing1
obs.cn-north-4.myhuaweicloud.com: CN North-Beijing4
obs.cn-south-1.myhuaweicloud.com: CN South-Guangzhou
obs.ap-southeast-1.myhuaweicloud.com: CN-Hong Kong
obs.sa-argentina-1.myhuaweicloud.com: LA-Buenos Aires1
obs.sa-peru-1.myhuaweicloud.com: LA-Lima1
obs.na-mexico-1.myhuaweicloud.com: LA-Mexico City1
obs.sa-chile-1.myhuaweicloud.com: LA-Santiago2
obs.sa-brazil-1.myhuaweicloud.com: LA-Sao Paulo1
obs.ru-northwest-2.myhuaweicloud.com: RU-Moscow2
acl: {}
bucket_acl: true
quirks:
# Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
list_url_encode: false
list_version: 1
use_already_exists: false

View File

@@ -1,126 +0,0 @@
name: IBMCOS
description: IBM COS S3
region: {}
endpoint:
s3.us.cloud-object-storage.appdomain.cloud: US Cross Region Endpoint
s3.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Endpoint
s3.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Endpoint
s3.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Endpoint
s3.private.us.cloud-object-storage.appdomain.cloud: US Cross Region Private Endpoint
s3.private.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Private Endpoint
s3.private.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Private Endpoint
s3.private.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Private Endpoint
s3.us-east.cloud-object-storage.appdomain.cloud: US Region East Endpoint
s3.private.us-east.cloud-object-storage.appdomain.cloud: US Region East Private Endpoint
s3.us-south.cloud-object-storage.appdomain.cloud: US Region South Endpoint
s3.private.us-south.cloud-object-storage.appdomain.cloud: US Region South Private Endpoint
s3.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Endpoint
s3.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Endpoint
s3.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Endpoint
s3.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Endpoint
s3.private.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Private Endpoint
s3.private.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Private Endpoint
s3.private.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Private Endpoint
s3.private.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Private Endpoint
s3.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Endpoint
s3.private.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Private Endpoint
s3.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Endpoint
s3.private.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Private Endpoint
s3.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Endpoint
s3.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Endpoint
s3.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Endpoint
s3.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Endpoint
s3.private.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Private Endpoint
s3.private.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Private Endpoint
s3.private.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Private Endpoint
s3.private.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Private Endpoint
s3.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Endpoint
s3.private.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Private Endpoint
s3.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Endpoint
s3.private.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Private Endpoint
s3.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Endpoint
s3.private.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Private Endpoint
s3.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Endpoint
s3.private.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Private Endpoint
s3.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Endpoint
s3.private.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Private Endpoint
s3.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Endpoint
s3.private.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Private Endpoint
s3.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Endpoint
s3.private.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Private Endpoint
s3.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Endpoint
s3.private.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Private Endpoint
s3.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Endpoint
s3.private.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Private Endpoint
s3.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Endpoint
s3.private.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Private Endpoint
s3.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Endpoint
s3.private.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Private Endpoint
s3.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Endpoint
s3.private.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Private Endpoint
s3.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Endpoint
s3.private.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Private Endpoint
s3.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Endpoint
s3.private.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Private Endpoint
s3.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Endpoint
s3.private.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Private Endpoint
location_constraint:
us-standard: US Cross Region Standard
us-vault: US Cross Region Vault
us-cold: US Cross Region Cold
us-flex: US Cross Region Flex
us-east-standard: US East Region Standard
us-east-vault: US East Region Vault
us-east-cold: US East Region Cold
us-east-flex: US East Region Flex
us-south-standard: US South Region Standard
us-south-vault: US South Region Vault
us-south-cold: US South Region Cold
us-south-flex: US South Region Flex
eu-standard: EU Cross Region Standard
eu-vault: EU Cross Region Vault
eu-cold: EU Cross Region Cold
eu-flex: EU Cross Region Flex
eu-gb-standard: Great Britain Standard
eu-gb-vault: Great Britain Vault
eu-gb-cold: Great Britain Cold
eu-gb-flex: Great Britain Flex
ap-standard: APAC Standard
ap-vault: APAC Vault
ap-cold: APAC Cold
ap-flex: APAC Flex
mel01-standard: Melbourne Standard
mel01-vault: Melbourne Vault
mel01-cold: Melbourne Cold
mel01-flex: Melbourne Flex
tor01-standard: Toronto Standard
tor01-vault: Toronto Vault
tor01-cold: Toronto Cold
tor01-flex: Toronto Flex
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
This acl is available on IBM Cloud (Infra), On-Premise IBM COS.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
Not supported on Buckets.
This acl is available on IBM Cloud (Infra) and On-Premise IBM COS.
ibm_api_key: true
ibm_resource_instance_id: true
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false
use_already_exists: false # returns BucketAlreadyExists

View File

@@ -1,7 +0,0 @@
name: IDrive
description: IDrive e2
acl: {}
bucket_acl: true
quirks:
force_path_style: true
use_already_exists: false

View File

@@ -1,17 +0,0 @@
name: IONOS
description: IONOS Cloud
region:
de: Frankfurt, Germany
eu-central-2: Berlin, Germany
eu-south-2: Logrono, Spain
endpoint:
s3-eu-central-1.ionoscloud.com: Frankfurt, Germany
s3-eu-central-2.ionoscloud.com: Berlin, Germany
s3-eu-south-2.ionoscloud.com: Logrono, Spain
acl: {}
bucket_acl: true
quirks:
# listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
force_path_style: true
list_url_encode: false
use_already_exists: false

View File

@@ -1,10 +0,0 @@
name: Intercolo
description: Intercolo Object Storage
region:
de-fra: Frankfurt, Germany
endpoint:
de-fra.i3storage.com: Frankfurt, Germany
acl: {}
bucket_acl: true
quirks:
use_unsigned_payload: false # has trailer support

View File

@@ -1,11 +0,0 @@
name: Leviia
description: Leviia Object Storage
region: {}
endpoint:
s3.leviia.com: |-
The default endpoint
Leviia
acl: {}
bucket_acl: true
quirks:
use_already_exists: false

View File

@@ -1,15 +0,0 @@
name: Liara
description: Liara Object Storage
endpoint:
storage.iran.liara.space: |-
The default endpoint
Iran
acl: {}
storage_class:
STANDARD: Standard storage class
bucket_acl: true
quirks:
force_path_style: true
list_url_encode: false
use_multipart_etag: false # mulitpart ETags differ from AWS
use_already_exists: false

View File

@@ -1,26 +0,0 @@
name: Linode
description: Linode Object Storage
endpoint:
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
ap-south-1.linodeobjects.com: Singapore, ap-south-1
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
acl: {}
bucket_acl: true

View File

@@ -1,12 +0,0 @@
name: LyveCloud
description: Seagate Lyve Cloud
region: {}
endpoint:
's3.us-west-1.{account_name}.lyve.seagate.com': US West 1 - California
's3.eu-west-1.{account_name}.lyve.seagate.com': EU West 1 - Ireland
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
use_multipart_etag: false # multipart ETags differ from AWS
use_already_exists: false

View File

@@ -1,16 +0,0 @@
name: Magalu
description: Magalu Object Storage
endpoint:
br-se1.magaluobjects.com: São Paulo, SP (BR), br-se1
br-ne1.magaluobjects.com: Fortaleza, CE (BR), br-ne1
acl: {}
storage_class:
STANDARD: Standard storage class
GLACIER_IR: Glacier Instant Retrieval storage class
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,15 +0,0 @@
name: Mega
description: MEGA S4 Object Storage
endpoint:
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
s3.eu-central-2.s4.mega.io: Mega S4 eu-central-2 (Bettembourg)
s3.ca-central-1.s4.mega.io: Mega S4 ca-central-1 (Montreal)
s3.ca-west-1.s4.mega.io: Mega S4 ca-west-1 (Vancouver)
bucket_acl: true
quirks:
list_version: 2
force_path_style: true
list_url_encode: true
use_multipart_etag: false
use_already_exists: false
copy_cutoff: 9223372036854775807

View File

@@ -1,18 +0,0 @@
name: Minio
description: Minio Object Storage
region: {}
endpoint: {}
location_constraint: {}
acl: {}
server_side_encryption:
'': None
AES256: AES256
aws:kms: aws:kms
bucket_acl: true
sse_customer_algorithm: true
sse_customer_key: true
sse_customer_key_base64: true
sse_customer_key_md5: true
sse_kms_key_id: true
quirks:
force_path_style: true

View File

@@ -1,12 +0,0 @@
name: Netease
description: Netease Object Storage (NOS)
region: {}
endpoint: {}
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
list_version: 1
list_url_encode: false
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,36 +0,0 @@
name: OVHcloud
description: OVHcloud Object Storage
region:
gra: Gravelines, France
rbx: Roubaix, France
sbg: Strasbourg, France
eu-west-par: Paris, France (3AZ)
de: Frankfurt, Germany
uk: London, United Kingdom
waw: Warsaw, Poland
bhs: Beauharnois, Canada
ca-east-tor: Toronto, Canada
sgp: Singapore
ap-southeast-syd: Sydney, Australia
ap-south-mum: Mumbai, India
us-east-va: Vint Hill, Virginia, USA
us-west-or: Hillsboro, Oregon, USA
rbx-archive: Roubaix, France (Cold Archive)
endpoint:
s3.gra.io.cloud.ovh.net: OVHcloud Gravelines, France
s3.rbx.io.cloud.ovh.net: OVHcloud Roubaix, France
s3.sbg.io.cloud.ovh.net: OVHcloud Strasbourg, France
s3.eu-west-par.io.cloud.ovh.net: OVHcloud Paris, France (3AZ)
s3.de.io.cloud.ovh.net: OVHcloud Frankfurt, Germany
s3.uk.io.cloud.ovh.net: OVHcloud London, United Kingdom
s3.waw.io.cloud.ovh.net: OVHcloud Warsaw, Poland
s3.bhs.io.cloud.ovh.net: OVHcloud Beauharnois, Canada
s3.ca-east-tor.io.cloud.ovh.net: OVHcloud Toronto, Canada
s3.sgp.io.cloud.ovh.net: OVHcloud Singapore
s3.ap-southeast-syd.io.cloud.ovh.net: OVHcloud Sydney, Australia
s3.ap-south-mum.io.cloud.ovh.net: OVHcloud Mumbai, India
s3.us-east-va.io.cloud.ovh.us: OVHcloud Vint Hill, Virginia, USA
s3.us-west-or.io.cloud.ovh.us: OVHcloud Hillsboro, Oregon, USA
s3.rbx-archive.io.cloud.ovh.net: OVHcloud Roubaix, France (Cold Archive)
acl: {}
bucket_acl: true

View File

@@ -1,39 +0,0 @@
name: Other
description: Any other S3 compatible provider
region:
'': |-
Use this if unsure.
Will use v4 signatures and an empty region.
other-v2-signature: |-
Use this only if v4 signatures don't work.
E.g. pre Jewel/v10 CEPH.
endpoint: {}
location_constraint: {}
acl:
private: |-
Owner gets FULL_CONTROL.
No one else has access rights (default).
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
Granting this on a bucket is generally not recommended.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
bucket-owner-read: |-
Object owner gets FULL_CONTROL.
Bucket owner gets READ access.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
bucket-owner-full-control: |-
Both the object owner and the bucket owner get FULL_CONTROL over the object.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,18 +0,0 @@
name: Outscale
description: OUTSCALE Object Storage (OOS)
region:
eu-west-2: Paris, France
us-east-2: New Jersey, USA
us-west-1: California, USA
cloudgouv-eu-west-1: SecNumCloud, Paris, France
ap-northeast-1: Tokyo, Japan
endpoint:
oos.eu-west-2.outscale.com: Outscale EU West 2 (Paris)
oos.us-east-2.outscale.com: Outscale US east 2 (New Jersey)
oos.us-west-1.outscale.com: Outscale EU West 1 (California)
oos.cloudgouv-eu-west-1.outscale.com: Outscale SecNumCloud (Paris)
oos.ap-northeast-1.outscale.com: Outscale AP Northeast 1 (Japan)
acl: {}
bucket_acl: true
quirks:
force_path_style: true

View File

@@ -1,19 +0,0 @@
name: Petabox
description: Petabox Object Storage
region:
us-east-1: US East (N. Virginia)
eu-central-1: Europe (Frankfurt)
ap-southeast-1: Asia Pacific (Singapore)
me-south-1: Middle East (Bahrain)
sa-east-1: South America (São Paulo)
endpoint:
s3.petabox.io: US East (N. Virginia)
s3.us-east-1.petabox.io: US East (N. Virginia)
s3.eu-central-1.petabox.io: Europe (Frankfurt)
s3.ap-southeast-1.petabox.io: Asia Pacific (Singapore)
s3.me-south-1.petabox.io: Middle East (Bahrain)
s3.sa-east-1.petabox.io: South America (São Paulo)
acl: {}
bucket_acl: true
quirks:
use_already_exists: false

View File

@@ -1,53 +0,0 @@
name: Qiniu
description: Qiniu Object Storage (Kodo)
region:
cn-east-1: |-
The default endpoint - a good choice if you are unsure.
East China Region 1.
Needs location constraint cn-east-1.
cn-east-2: |-
East China Region 2.
Needs location constraint cn-east-2.
cn-north-1: |-
North China Region 1.
Needs location constraint cn-north-1.
cn-south-1: |-
South China Region 1.
Needs location constraint cn-south-1.
us-north-1: |-
North America Region.
Needs location constraint us-north-1.
ap-southeast-1: |-
Southeast Asia Region 1.
Needs location constraint ap-southeast-1.
ap-northeast-1: |-
Northeast Asia Region 1.
Needs location constraint ap-northeast-1.
endpoint:
s3-cn-east-1.qiniucs.com: East China Endpoint 1
s3-cn-east-2.qiniucs.com: East China Endpoint 2
s3-cn-north-1.qiniucs.com: North China Endpoint 1
s3-cn-south-1.qiniucs.com: South China Endpoint 1
s3-us-north-1.qiniucs.com: North America Endpoint 1
s3-ap-southeast-1.qiniucs.com: Southeast Asia Endpoint 1
s3-ap-northeast-1.qiniucs.com: Northeast Asia Endpoint 1
location_constraint:
cn-east-1: East China Region 1
cn-east-2: East China Region 2
cn-north-1: North China Region 1
cn-south-1: South China Region 1
us-north-1: North America Region 1
ap-southeast-1: Southeast Asia Region 1
ap-northeast-1: Northeast Asia Region 1
acl: {}
storage_class:
STANDARD: Standard storage class
LINE: Infrequent access storage mode
GLACIER: Archive storage mode
DEEP_ARCHIVE: Deep archive storage mode
bucket_acl: true
quirks:
use_multipart_etag: false
list_url_encode: false
force_path_style: true
use_already_exists: false

View File

@@ -1,15 +0,0 @@
name: Rabata
description: Rabata Cloud Storage
region:
us-east-1: US East (N. Virginia)
eu-west-1: EU (Ireland)
eu-west-2: EU (London)
endpoint:
s3.us-east-1.rabata.io: US East (N. Virginia)
s3.eu-west-1.rabata.io: EU West (Ireland)
s3.eu-west-2.rabata.io: EU West (London)
location_constraint:
us-east-1: US East (N. Virginia)
eu-west-1: EU (Ireland)
eu-west-2: EU (London)
# server side copy not supported

View File

@@ -1,67 +0,0 @@
name: RackCorp
description: RackCorp Object Storage
region:
global: Global CDN (All locations) Region
au: Australia (All states)
au-nsw: NSW (Australia) Region
au-qld: QLD (Australia) Region
au-vic: VIC (Australia) Region
au-wa: Perth (Australia) Region
ph: Manila (Philippines) Region
th: Bangkok (Thailand) Region
hk: HK (Hong Kong) Region
mn: Ulaanbaatar (Mongolia) Region
kg: Bishkek (Kyrgyzstan) Region
id: Jakarta (Indonesia) Region
jp: Tokyo (Japan) Region
sg: SG (Singapore) Region
de: Frankfurt (Germany) Region
us: USA (AnyCast) Region
us-east-1: New York (USA) Region
us-west-1: Freemont (USA) Region
nz: Auckland (New Zealand) Region
endpoint:
s3.rackcorp.com: Global (AnyCast) Endpoint
au.s3.rackcorp.com: Australia (Anycast) Endpoint
au-nsw.s3.rackcorp.com: Sydney (Australia) Endpoint
au-qld.s3.rackcorp.com: Brisbane (Australia) Endpoint
au-vic.s3.rackcorp.com: Melbourne (Australia) Endpoint
au-wa.s3.rackcorp.com: Perth (Australia) Endpoint
ph.s3.rackcorp.com: Manila (Philippines) Endpoint
th.s3.rackcorp.com: Bangkok (Thailand) Endpoint
hk.s3.rackcorp.com: HK (Hong Kong) Endpoint
mn.s3.rackcorp.com: Ulaanbaatar (Mongolia) Endpoint
kg.s3.rackcorp.com: Bishkek (Kyrgyzstan) Endpoint
id.s3.rackcorp.com: Jakarta (Indonesia) Endpoint
jp.s3.rackcorp.com: Tokyo (Japan) Endpoint
sg.s3.rackcorp.com: SG (Singapore) Endpoint
de.s3.rackcorp.com: Frankfurt (Germany) Endpoint
us.s3.rackcorp.com: USA (AnyCast) Endpoint
us-east-1.s3.rackcorp.com: New York (USA) Endpoint
us-west-1.s3.rackcorp.com: Freemont (USA) Endpoint
nz.s3.rackcorp.com: Auckland (New Zealand) Endpoint
location_constraint:
global: Global CDN Region
au: Australia (All locations)
au-nsw: NSW (Australia) Region
au-qld: QLD (Australia) Region
au-vic: VIC (Australia) Region
au-wa: Perth (Australia) Region
ph: Manila (Philippines) Region
th: Bangkok (Thailand) Region
hk: HK (Hong Kong) Region
mn: Ulaanbaatar (Mongolia) Region
kg: Bishkek (Kyrgyzstan) Region
id: Jakarta (Indonesia) Region
jp: Tokyo (Japan) Region
sg: SG (Singapore) Region
de: Frankfurt (Germany) Region
us: USA (AnyCast) Region
us-east-1: New York (USA) Region
us-west-1: Fremont (USA) Region
nz: Auckland (New Zealand) Region
acl: {}
bucket_acl: true
quirks:
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,11 +0,0 @@
name: Rclone
description: Rclone S3 Server
endpoint: {}
quirks:
force_path_style: true
use_multipart_etag: false
use_already_exists: false
# rclone serve doesn't support multi-part server side copy:
# See: https://github.com/rclone/rclone/issues/7454
# So make cutoff very large which it does support
copy_cutoff: 9223372036854775807

View File

@@ -1,28 +0,0 @@
name: Scaleway
description: Scaleway Object Storage
region:
nl-ams: Amsterdam, The Netherlands
fr-par: Paris, France
pl-waw: Warsaw, Poland
endpoint:
s3.nl-ams.scw.cloud: Amsterdam Endpoint
s3.fr-par.scw.cloud: Paris Endpoint
s3.pl-waw.scw.cloud: Warsaw Endpoint
acl: {}
storage_class:
'': Default.
STANDARD: |-
The Standard class for any upload.
Suitable for on-demand content like streaming or CDN.
Available in all regions.
GLACIER: |-
Archived storage.
Prices are lower, but it needs to be restored first to be accessed.
Available in FR-PAR and NL-AMS regions.
ONEZONE_IA: |-
One Zone - Infrequent Access.
A good choice for storing secondary backup copies or easily re-creatable data.
Available in the FR-PAR region only.
bucket_acl: true
quirks:
max_upload_parts: 1000

View File

@@ -1,14 +0,0 @@
name: SeaweedFS
description: SeaweedFS S3
region: {}
endpoint:
localhost:8333: SeaweedFS S3 localhost
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,8 +0,0 @@
name: Selectel
description: Selectel Object Storage
region:
ru-1: St. Petersburg
endpoint:
s3.ru-1.storage.selcloud.ru: Saint Petersburg
quirks:
list_url_encode: false

View File

@@ -1,17 +0,0 @@
name: Servercore
description: Servercore Object Storage
region:
ru-1: St. Petersburg
gis-1: Moscow
ru-7: Moscow
uz-2: Tashkent, Uzbekistan
kz-1: Almaty, Kazakhstan
endpoint:
s3.ru-1.storage.selcloud.ru: Saint Petersburg
s3.gis-1.storage.selcloud.ru: Moscow
s3.ru-7.storage.selcloud.ru: Moscow
s3.uz-2.srvstorage.uz: Tashkent, Uzbekistan
s3.kz-1.srvstorage.kz: Almaty, Kazakhstan
bucket_acl: true
quirks:
list_url_encode: false

View File

@@ -1,5 +0,0 @@
name: SpectraLogic
description: Spectra Logic Black Pearl
endpoint: {}
quirks:
force_path_style: true # path-style required

View File

@@ -1,14 +0,0 @@
name: StackPath
description: StackPath Object Storage
region: {}
endpoint:
s3.us-east-2.stackpathstorage.com: US East Endpoint
s3.us-west-1.stackpathstorage.com: US West Endpoint
s3.eu-central-1.stackpathstorage.com: EU Endpoint
acl: {}
bucket_acl: true
quirks:
list_version: 1
force_path_style: true
list_url_encode: false
use_already_exists: false

View File

@@ -1,11 +0,0 @@
name: Storj
description: Storj (S3 Compatible Gateway)
endpoint:
gateway.storjshare.io: Global Hosted Gateway
quirks:
use_already_exists: false # returns BucketAlreadyExists
# Storj doesn't support multi-part server side copy:
# https://github.com/storj/roadmap/issues/40
# So make cutoff very large which it does support
copy_cutoff: 9223372036854775807
min_chunk_size: 67108864

View File

@@ -1,18 +0,0 @@
name: Synology
description: Synology C2 Object Storage
region:
eu-001: Europe Region 1
eu-002: Europe Region 2
us-001: US Region 1
us-002: US Region 2
tw-001: Asia (Taiwan)
endpoint:
eu-001.s3.synologyc2.net: EU Endpoint 1
eu-002.s3.synologyc2.net: EU Endpoint 2
us-001.s3.synologyc2.net: US Endpoint 1
us-002.s3.synologyc2.net: US Endpoint 2
tw-001.s3.synologyc2.net: TW Endpoint 1
location_constraint: {}
quirks:
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,52 +0,0 @@
name: TencentCOS
description: Tencent Cloud Object Storage (COS)
endpoint:
cos.ap-beijing.myqcloud.com: Beijing Region
cos.ap-nanjing.myqcloud.com: Nanjing Region
cos.ap-shanghai.myqcloud.com: Shanghai Region
cos.ap-guangzhou.myqcloud.com: Guangzhou Region
cos.ap-chengdu.myqcloud.com: Chengdu Region
cos.ap-chongqing.myqcloud.com: Chongqing Region
cos.ap-hongkong.myqcloud.com: Hong Kong (China) Region
cos.ap-singapore.myqcloud.com: Singapore Region
cos.ap-mumbai.myqcloud.com: Mumbai Region
cos.ap-seoul.myqcloud.com: Seoul Region
cos.ap-bangkok.myqcloud.com: Bangkok Region
cos.ap-tokyo.myqcloud.com: Tokyo Region
cos.na-siliconvalley.myqcloud.com: Silicon Valley Region
cos.na-ashburn.myqcloud.com: Virginia Region
cos.na-toronto.myqcloud.com: Toronto Region
cos.eu-frankfurt.myqcloud.com: Frankfurt Region
cos.eu-moscow.myqcloud.com: Moscow Region
cos.accelerate.myqcloud.com: Use Tencent COS Accelerate Endpoint
acl:
default: |-
Owner gets Full_CONTROL.
No one else has access rights (default).
public-read: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ access.
public-read-write: |-
Owner gets FULL_CONTROL.
The AllUsers group gets READ and WRITE access.
Granting this on a bucket is generally not recommended.
authenticated-read: |-
Owner gets FULL_CONTROL.
The AuthenticatedUsers group gets READ access.
bucket-owner-read: |-
Object owner gets FULL_CONTROL.
Bucket owner gets READ access.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
bucket-owner-full-control: |-
Both the object owner and the bucket owner get FULL_CONTROL over the object.
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
storage_class:
'': Default
STANDARD: Standard storage class
ARCHIVE: Archive storage mode
STANDARD_IA: Infrequent access storage mode
bucket_acl: true
quirks:
list_version: 1
use_multipart_etag: false
use_already_exists: false

View File

@@ -1,21 +0,0 @@
name: Wasabi
description: Wasabi Object Storage
region: {}
endpoint:
s3.wasabisys.com: Wasabi US East 1 (N. Virginia)
s3.us-east-2.wasabisys.com: Wasabi US East 2 (N. Virginia)
s3.us-central-1.wasabisys.com: Wasabi US Central 1 (Texas)
s3.us-west-1.wasabisys.com: Wasabi US West 1 (Oregon)
s3.ca-central-1.wasabisys.com: Wasabi CA Central 1 (Toronto)
s3.eu-central-1.wasabisys.com: Wasabi EU Central 1 (Amsterdam)
s3.eu-central-2.wasabisys.com: Wasabi EU Central 2 (Frankfurt)
s3.eu-west-1.wasabisys.com: Wasabi EU West 1 (London)
s3.eu-west-2.wasabisys.com: Wasabi EU West 2 (Paris)
s3.eu-south-1.wasabisys.com: Wasabi EU South 1 (Milan)
s3.ap-northeast-1.wasabisys.com: Wasabi AP Northeast 1 (Tokyo) endpoint
s3.ap-northeast-2.wasabisys.com: Wasabi AP Northeast 2 (Osaka) endpoint
s3.ap-southeast-1.wasabisys.com: Wasabi AP Southeast 1 (Singapore)
s3.ap-southeast-2.wasabisys.com: Wasabi AP Southeast 2 (Sydney)
location_constraint: {}
acl: {}
bucket_acl: true

View File

@@ -1,14 +0,0 @@
name: Zata
description: Zata (S3 compatible Gateway)
region:
us-east-1: Indore, Madhya Pradesh, India
endpoint:
idr01.zata.ai: South Asia Endpoint
location_constraint: {}
acl: {}
bucket_acl: true
quirks:
use_multipart_etag: false
might_gzip: false
use_unsigned_payload: false
use_already_exists: false

View File

@@ -1,236 +0,0 @@
package s3
import (
"embed"
stdfs "io/fs"
"os"
"sort"
"strings"
"github.com/rclone/rclone/fs"
orderedmap "github.com/wk8/go-ordered-map/v2"
"gopkg.in/yaml.v3"
)
// YamlMap is converted to YAML in the correct order
type YamlMap = *orderedmap.OrderedMap[string, string]
// NewYamlMap creates a new ordered map
var NewYamlMap = orderedmap.New[string, string]
// Quirks defines all the S3 provider quirks
type Quirks struct {
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
MightGzip *bool `yaml:"might_gzip,omitempty"`
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
UseXID *bool `yaml:"use_x_id,omitempty"`
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
}
// Provider defines the configurable data in each provider.yaml
type Provider struct {
Name string `yaml:"name,omitempty"`
Description string `yaml:"description,omitempty"`
Region YamlMap `yaml:"region,omitempty"`
Endpoint YamlMap `yaml:"endpoint,omitempty"`
LocationConstraint YamlMap `yaml:"location_constraint,omitempty"`
ACL YamlMap `yaml:"acl,omitempty"`
StorageClass YamlMap `yaml:"storage_class,omitempty"`
ServerSideEncryption YamlMap `yaml:"server_side_encryption,omitempty"`
// other
IBMApiKey bool `yaml:"ibm_api_key,omitempty"`
IBMResourceInstanceID bool `yaml:"ibm_resource_instance_id,omitempty"`
// advanced
BucketACL bool `yaml:"bucket_acl,omitempty"`
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
RequesterPays bool `yaml:"requester_pays,omitempty"`
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
Quirks Quirks `yaml:"quirks,omitempty"`
}
//go:embed provider/*.yaml
var providerFS embed.FS
// addProvidersToInfo adds provider information to the fs.RegInfo
func addProvidersToInfo(info *fs.RegInfo) *fs.RegInfo {
providerMap := loadProviders()
providerList := constructProviders(info.Options, providerMap)
info.Description += strings.TrimSuffix(providerList, ", ")
return info
}
// loadProvider loads a single provider
//
// It returns nil if it could not be found except if "Other" which is a fatal error.
func loadProvider(name string) *Provider {
data, err := stdfs.ReadFile(providerFS, "provider/"+name+".yaml")
if err != nil {
if os.IsNotExist(err) && name != "Other" {
return nil
}
fs.Fatalf(nil, "internal error: failed to load provider %q: %v", name, err)
}
var p Provider
err = yaml.Unmarshal(data, &p)
if err != nil {
fs.Fatalf(nil, "internal error: failed to unmarshal provider %q: %v", name, err)
}
return &p
}
// loadProviders loads provider definitions from embedded YAML files
func loadProviders() map[string]*Provider {
providers, err := stdfs.ReadDir(providerFS, "provider")
if err != nil {
fs.Fatalf(nil, "internal error: failed to read embedded providers: %v", err)
}
providerMap := make(map[string]*Provider, len(providers))
for _, provider := range providers {
name, _ := strings.CutSuffix(provider.Name(), ".yaml")
p := loadProvider(name)
providerMap[p.Name] = p
}
return providerMap
}
// constructProviders populates fs.Options with provider-specific examples and information
func constructProviders(options fs.Options, providerMap map[string]*Provider) string {
// Defaults for map options set to {}
defaults := providerMap["Other"]
// sort providers: AWS first, Other last, rest alphabetically
providers := make([]*Provider, 0, len(providerMap))
for _, p := range providerMap {
providers = append(providers, p)
}
sort.Slice(providers, func(i, j int) bool {
if providers[i].Name == "AWS" {
return true
}
if providers[j].Name == "AWS" {
return false
}
if providers[i].Name == "Other" {
return false
}
if providers[j].Name == "Other" {
return true
}
return strings.ToLower(providers[i].Name) < strings.ToLower(providers[j].Name)
})
addProvider := func(sp *string, name string) {
if *sp != "" {
*sp += ","
}
*sp += name
}
addBool := func(opt *fs.Option, p *Provider, flag bool) {
if flag {
addProvider(&opt.Provider, p.Name)
}
}
addExample := func(opt *fs.Option, p *Provider, examples, defaultExamples YamlMap) {
if examples == nil {
return
}
if examples.Len() == 0 {
examples = defaultExamples
}
addProvider(&opt.Provider, p.Name)
OUTER:
for pair := examples.Oldest(); pair != nil; pair = pair.Next() {
// Find an existing example to add to if possible
for i, example := range opt.Examples {
if example.Value == pair.Key && example.Help == pair.Value {
addProvider(&opt.Examples[i].Provider, p.Name)
continue OUTER
}
}
// Otherwise add a new one
opt.Examples = append(opt.Examples, fs.OptionExample{
Value: pair.Key,
Help: pair.Value,
Provider: p.Name,
})
}
}
var providerList strings.Builder
for _, p := range providers {
for i := range options {
opt := &options[i]
switch opt.Name {
case "provider":
opt.Examples = append(opt.Examples, fs.OptionExample{
Value: p.Name,
Help: p.Description,
})
providerList.WriteString(p.Name + ", ")
case "region":
addExample(opt, p, p.Region, defaults.Region)
case "endpoint":
addExample(opt, p, p.Endpoint, defaults.Endpoint)
case "location_constraint":
addExample(opt, p, p.LocationConstraint, defaults.LocationConstraint)
case "acl":
addExample(opt, p, p.ACL, defaults.ACL)
case "storage_class":
addExample(opt, p, p.StorageClass, defaults.StorageClass)
case "server_side_encryption":
addExample(opt, p, p.ServerSideEncryption, defaults.ServerSideEncryption)
case "bucket_acl":
addBool(opt, p, p.BucketACL)
case "requester_pays":
addBool(opt, p, p.RequesterPays)
case "sse_customer_algorithm":
addBool(opt, p, p.SSECustomerAlgorithm)
case "sse_kms_key_id":
addBool(opt, p, p.SSEKmsKeyID)
case "sse_customer_key":
addBool(opt, p, p.SSECustomerKey)
case "sse_customer_key_base64":
addBool(opt, p, p.SSECustomerKeyBase64)
case "sse_customer_key_md5":
addBool(opt, p, p.SSECustomerKeyMd5)
case "directory_bucket":
addBool(opt, p, p.DirectoryBucket)
case "ibm_api_key":
addBool(opt, p, p.IBMApiKey)
case "ibm_resource_instance_id":
addBool(opt, p, p.IBMResourceInstanceID)
case "leave_parts_on_error":
addBool(opt, p, p.LeavePartsOnError)
case "sts_endpoint":
addBool(opt, p, p.STSEndpoint)
case "use_accelerate_endpoint":
addBool(opt, p, p.UseAccelerateEndpoint)
}
}
}
return strings.TrimSuffix(providerList.String(), ", ")
}

File diff suppressed because it is too large Load Diff

View File

@@ -62,14 +62,14 @@ func TestAWSDualStackOption(t *testing.T) {
// test enabled
ctx, opt, client := SetupS3Test(t)
opt.UseDualStack = true
s3Conn, _, err := s3Connection(ctx, opt, client)
s3Conn, err := s3Connection(ctx, opt, client)
require.NoError(t, err)
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
}
{
// test default case
ctx, opt, client := SetupS3Test(t)
s3Conn, _, err := s3Connection(ctx, opt, client)
s3Conn, err := s3Connection(ctx, opt, client)
require.NoError(t, err)
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
}

View File

@@ -10,7 +10,6 @@ import (
"os/exec"
"slices"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
@@ -51,9 +50,6 @@ func (s *sshClientExternal) Close() error {
func (s *sshClientExternal) NewSession() (sshSession, error) {
session := s.f.newSSHSessionExternal()
if s.session == nil {
// Store the first session so Wait() and Close() can use it
s.session = session
} else {
fs.Debugf(s.f, "ssh external: creating additional session")
}
return session, nil
@@ -80,8 +76,6 @@ type sshSessionExternal struct {
cancel func()
startCalled bool
runningSFTP bool
waitOnce sync.Once // ensure Wait() is only called once
waitErr error // result of the Wait() call
}
func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
@@ -181,17 +175,16 @@ func (s *sshSessionExternal) exited() bool {
// Wait for the command to exit
func (s *sshSessionExternal) Wait() error {
// Use sync.Once to ensure we only wait for the process once.
// This is safe even if Wait() is called from multiple goroutines.
s.waitOnce.Do(func() {
s.waitErr = s.cmd.Wait()
if s.waitErr == nil {
fs.Debugf(s.f, "ssh external: command exited OK")
} else {
fs.Debugf(s.f, "ssh external: command exited with error: %v", s.waitErr)
}
})
return s.waitErr
if s.exited() {
return nil
}
err := s.cmd.Wait()
if err == nil {
fs.Debugf(s.f, "ssh external: command exited OK")
} else {
fs.Debugf(s.f, "ssh external: command exited with error: %v", err)
}
return err
}
// Run runs cmd on the remote host. Typically, the remote

View File

@@ -1,84 +0,0 @@
//go:build !plan9
package sftp
import (
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
)
// TestSSHExternalWaitMultipleCalls verifies that calling Wait() multiple times
// doesn't cause zombie processes
func TestSSHExternalWaitMultipleCalls(t *testing.T) {
// Create a minimal Fs object for testing
opt := &Options{
SSH: fs.SpaceSepList{"echo", "test"},
}
f := &Fs{
opt: *opt,
}
// Create a new SSH session
session := f.newSSHSessionExternal()
// Start a simple command that exits quickly
err := session.Start("exit 0")
assert.NoError(t, err)
// Give the command time to complete
time.Sleep(100 * time.Millisecond)
// Call Wait() multiple times - this should not cause issues
err1 := session.Wait()
err2 := session.Wait()
err3 := session.Wait()
// All calls should return the same result (no error in this case)
assert.NoError(t, err1)
assert.NoError(t, err2)
assert.NoError(t, err3)
// Verify the process has exited
assert.True(t, session.exited())
}
// TestSSHExternalCloseMultipleCalls verifies that calling Close() multiple times
// followed by Wait() calls doesn't cause zombie processes
func TestSSHExternalCloseMultipleCalls(t *testing.T) {
// Create a minimal Fs object for testing
opt := &Options{
SSH: fs.SpaceSepList{"sleep", "10"},
}
f := &Fs{
opt: *opt,
}
// Create a new SSH session
session := f.newSSHSessionExternal()
// Start a long-running command
err := session.Start("sleep 10")
if err != nil {
t.Skip("Cannot start sleep command:", err)
}
// Close should cancel and wait for the process
_ = session.Close()
// Additional Wait() calls should return the same error
err2 := session.Wait()
err3 := session.Wait()
// All should complete without panicking
// err1 could be nil or an error depending on how the process was killed
// err2 and err3 should be the same
assert.Equal(t, err2, err3, "Subsequent Wait() calls should return same result")
// Verify the process has exited
assert.True(t, session.exited())
}

View File

@@ -1,6 +1,7 @@
// Code generated by vfsgen; DO NOT EDIT.
//go:build !dev
// +build !dev
package sharefile

View File

@@ -943,20 +943,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
used = container.Bytes
objects = container.Count
total = container.QuotaBytes
if f.opt.UseSegmentsContainer.Value {
err = f.pacer.Call(func() (bool, error) {
segmentsContainer := f.rootContainer + segmentsContainerSuffix
container, _, err = f.c.Container(ctx, segmentsContainer)
return shouldRetry(ctx, err)
})
if err != nil && err != swift.ContainerNotFound {
return nil, fmt.Errorf("container info failed: %w", err)
}
if err == nil {
used += container.Bytes
}
}
} else {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {

View File

@@ -56,11 +56,6 @@ func (f *Fs) testNoChunk(t *testing.T) {
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
// Track how much space is used before we put our object.
usage, err := f.About(ctx)
require.NoError(t, err)
usedBeforePut := *usage.Used
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
obj, err := f.Features().PutStream(ctx, in, obji)
@@ -75,13 +70,6 @@ func (f *Fs) testNoChunk(t *testing.T) {
require.NoError(t, err)
file.Check(t, obj, f.Precision())
// Check how much space is used after the upload, should match the amount we
// uploaded..
usage, err = f.About(ctx)
require.NoError(t, err)
expectedUsed := usedBeforePut + obj.Size()
require.EqualValues(t, expectedUsed, *usage.Used)
// Delete the object
assert.NoError(t, obj.Remove(ctx))
}
@@ -117,24 +105,12 @@ func (f *Fs) testWithChunk(t *testing.T) {
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
// Track how much space is used before we put our object.
ctx := context.TODO()
usage, err := f.About(ctx)
require.NoError(t, err)
usedBeforePut := *usage.Used
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
ctx := context.TODO()
obj, err := f.Features().PutStream(ctx, in, obji)
require.NoError(t, err)
require.NotEmpty(t, obj)
// Check how much space is used after the upload, should match the amount we
// uploaded..
usage, err = f.About(ctx)
require.NoError(t, err)
expectedUsed := usedBeforePut + obj.Size()
require.EqualValues(t, expectedUsed, *usage.Used)
}
func (f *Fs) testWithChunkFail(t *testing.T) {
@@ -207,14 +183,9 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
uploadHash := hash.NewMultiHasher()
in := io.TeeReader(buf, uploadHash)
// Track how much space is used before we put our object.
ctx := context.TODO()
usage, err := f.About(ctx)
require.NoError(t, err)
usedBeforePut := *usage.Used
file.Size = -1
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
ctx := context.TODO()
obj, err := f.Features().PutStream(ctx, in, obji)
require.NoError(t, err)
require.NotEmpty(t, obj)
@@ -223,13 +194,6 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
require.NoError(t, err)
require.NotEmpty(t, objTarget)
require.Equal(t, obj.Size(), objTarget.Size())
// Check how much space is used after the upload, should match the amount we
// uploaded *and* the copy.
usage, err = f.About(ctx)
require.NoError(t, err)
expectedUsed := usedBeforePut + obj.Size() + objTarget.Size()
require.EqualValues(t, expectedUsed, *usage.Used)
}
func (f *Fs) testPolicyDiscovery(t *testing.T) {

View File

@@ -104,19 +104,6 @@ type File struct {
} `json:"processing"`
}
// FolderSize represents the API object describing the sizes of a files and subfolders of a folder.
type FolderSize struct {
FilesSize int64 `json:"files_size"`
FilesCount int64 `json:"files_count"`
FoldersCount int64 `json:"folders_count"`
}
// FolderSizes describes the subfolder sizes of a single folder.
type FolderSizes struct {
Direct FolderSize `json:"direct"`
Recursive FolderSize `json:"recursive"`
}
// CreateFolderRequest represents the JSON API object
// that's sent to the create folder API endpoint.
type CreateFolderRequest struct {
@@ -139,9 +126,6 @@ type ListFilesResponse struct {
Items []File `json:"items"`
}
// FolderSizesResponse represents the response from the folder-sizes endpoint.
type FolderSizesResponse map[string]FolderSizes
// DeleteFoldersRequest represents the JSON API object
// that's sent to the delete folders API endpoint.
type DeleteFoldersRequest struct {

View File

@@ -97,8 +97,7 @@ any root slug set.`,
Advanced: true,
Default: encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeBackSlash,
},
},
})
}})
}
// Fs represents a remote uloz.to storage
@@ -144,6 +143,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.rest.SetHeader("X-Auth-Token", f.opt.AppToken)
auth, err := f.authenticate(ctx)
if err != nil {
return f, err
}
@@ -178,20 +178,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f, err
}
// About implements the Abouter interface for Uloz.to.
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
used, err := f.getUsedSize(ctx)
if err != nil {
return nil, err
}
usage := fs.Usage{
Used: &used,
}
return &usage, nil
}
// errorHandler parses a non 2xx error response into an error
func errorHandler(resp *http.Response) error {
// Decode error response
@@ -267,6 +253,7 @@ func (f *Fs) authenticate(ctx context.Context) (response *api.AuthenticateRespon
httpResp, err := f.rest.CallJSON(ctx, &opts, &authRequest, &response)
return f.shouldRetry(ctx, httpResp, err, false)
})
if err != nil {
return nil, err
}
@@ -276,32 +263,6 @@ func (f *Fs) authenticate(ctx context.Context) (response *api.AuthenticateRespon
return response, nil
}
func (f *Fs) getUsedSize(ctx context.Context) (int64, error) {
rootID, err := f.dirCache.RootID(ctx, false)
if err != nil {
return 0, err
}
opts := rest.Opts{
Method: "GET",
Path: fmt.Sprintf("/v6/user/%s/folder/%s/folder-sizes", f.opt.Username, rootID),
Parameters: url.Values{
"recursive": []string{"true"},
},
}
folderSizes := api.FolderSizesResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, nil, &folderSizes)
return f.shouldRetry(ctx, resp, err, true)
})
if err != nil {
return 0, err
}
return folderSizes[rootID].Recursive.FilesSize, nil
}
// UploadSession represents a single Uloz.to upload session.
//
// Uloz.to supports uploading multiple files at once and committing them atomically. This functionality isn't being used
@@ -349,6 +310,7 @@ func (session *UploadSession) renewUploadSession(ctx context.Context) error {
httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &createUploadURLReq, &response)
return session.Filesystem.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return err
}
@@ -362,12 +324,14 @@ func (session *UploadSession) renewUploadSession(ctx context.Context) error {
func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info fs.ObjectInfo, payload io.Reader) (fs.Object, error) {
session, err := f.createUploadSession(ctx)
if err != nil {
return nil, err
}
hashes := hash.NewHashSet(hash.MD5, hash.SHA256)
hasher, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
@@ -396,6 +360,7 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
httpResp, err := f.cdn.CallJSON(ctx, &opts, nil, &uploadResponse)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return nil, err
}
@@ -421,6 +386,7 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
}
encodedMetadata, err := metadata.encode()
if err != nil {
return nil, err
}
@@ -446,6 +412,7 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &updateReq, &updateResponse)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return nil, err
}
@@ -471,6 +438,7 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &commitRequest, &commitResponse)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return nil, err
}
@@ -500,6 +468,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// Uloz.to allows to have multiple files of the same name in the same folder.
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
filename, folderSlug, err := f.dirCache.FindPath(ctx, src.Remote(), true)
if err != nil {
return nil, err
}
@@ -515,6 +484,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error) {
folders, err := f.fetchListFolderPage(ctx, slug, "", 1, 0)
if err != nil {
return false, err
}
@@ -524,6 +494,7 @@ func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error
}
files, err := f.fetchListFilePage(ctx, slug, "", 1, 0)
if err != nil {
return false, err
}
@@ -538,11 +509,13 @@ func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error
// Rmdir implements the mandatory method fs.Fs.Rmdir.
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
slug, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
empty, err := f.isDirEmpty(ctx, slug)
if err != nil {
return err
}
@@ -561,6 +534,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
httpResp, err := f.rest.CallJSON(ctx, &opts, req, nil)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return err
}
@@ -584,6 +558,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
filename, folderSlug, err := f.dirCache.FindPath(ctx, remote, true)
if err != nil {
return nil, err
}
@@ -625,6 +600,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
httpResp, err := f.rest.CallJSON(ctx, &opts, &req, nil)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return err
}
@@ -765,6 +741,7 @@ func (o *Object) updateFileProperties(ctx context.Context, req any) (err error)
httpResp, err := o.fs.rest.CallJSON(ctx, &opts, &req, &resp)
return o.fs.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return err
}
@@ -893,6 +870,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
remote: o.Remote(),
}
newo, err := o.fs.PutUnchecked(ctx, in, info, options...)
if err != nil {
return err
}
@@ -936,6 +914,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// The time the object was last modified on the server - a handwavy guess, but we don't have any better
return o.remoteFsMtime
}
// Fs implements the mandatory method fs.Object.Fs
@@ -1074,6 +1053,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Fi
}
files, err := f.listFiles(ctx, folderSlug, filename)
if err != nil {
return nil, err
}
@@ -1085,6 +1065,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Fi
}
folders, err := f.listFolders(ctx, folderSlug, filename)
if err != nil {
return nil, err
}
@@ -1155,8 +1136,8 @@ func (f *Fs) fetchListFolderPage(
folderSlug string,
searchQuery string,
limit int,
offset int,
) (folders []api.Folder, err error) {
offset int) (folders []api.Folder, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/v9/user/" + f.opt.Username + "/folder/" + folderSlug + "/folder-list",
@@ -1179,6 +1160,7 @@ func (f *Fs) fetchListFolderPage(
httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return nil, err
}
@@ -1193,8 +1175,8 @@ func (f *Fs) fetchListFolderPage(
func (f *Fs) listFolders(
ctx context.Context,
folderSlug string,
searchQuery string,
) (folders []api.Folder, err error) {
searchQuery string) (folders []api.Folder, err error) {
targetPageSize := f.opt.ListPageSize
lastPageSize := targetPageSize
offset := 0
@@ -1222,8 +1204,8 @@ func (f *Fs) fetchListFilePage(
folderSlug string,
searchQuery string,
limit int,
offset int,
) (folders []api.File, err error) {
offset int) (folders []api.File, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/v8/user/" + f.opt.Username + "/folder/" + folderSlug + "/file-list",
@@ -1245,6 +1227,7 @@ func (f *Fs) fetchListFilePage(
httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody)
return f.shouldRetry(ctx, httpResp, err, true)
})
if err != nil {
return nil, fmt.Errorf("couldn't list files: %w", err)
}
@@ -1259,8 +1242,8 @@ func (f *Fs) fetchListFilePage(
func (f *Fs) listFiles(
ctx context.Context,
folderSlug string,
searchQuery string,
) (folders []api.File, err error) {
searchQuery string) (folders []api.File, err error) {
targetPageSize := f.opt.ListPageSize
lastPageSize := targetPageSize
offset := 0

View File

@@ -25,9 +25,6 @@ import (
"sync"
"time"
"github.com/Azure/go-ntlmssp"
"golang.org/x/sync/singleflight"
"github.com/rclone/rclone/backend/webdav/api"
"github.com/rclone/rclone/backend/webdav/odrvcookie"
"github.com/rclone/rclone/fs"
@@ -38,10 +35,11 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
ntlmssp "github.com/Azure/go-ntlmssp"
)
const (
@@ -194,7 +192,7 @@ type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
BearerToken string `config:"bearer_token"`
BearerTokenCommand fs.SpaceSepList `config:"bearer_token_command"`
BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"`
Headers fs.CommaSepList `config:"headers"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
@@ -228,7 +226,6 @@ type Fs struct {
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
chunksUploadURL string // upload URL for nextcloud chunked
canChunk bool // set if nextcloud and nextcloud_chunk_size is set
authSingleflight *singleflight.Group
}
// Object describes a webdav object
@@ -285,7 +282,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
return false, err
}
// If we have a bearer token command and it has expired then refresh it
if len(f.opt.BearerTokenCommand) != 0 && resp != nil && resp.StatusCode == 401 {
if f.opt.BearerTokenCommand != "" && resp != nil && resp.StatusCode == 401 {
fs.Debugf(f, "Bearer token expired: %v", err)
authErr := f.fetchAndSetBearerToken()
if authErr != nil {
@@ -479,14 +476,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f := &Fs{
name: name,
root: root,
opt: *opt,
endpoint: u,
endpointURL: u.String(),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported,
authSingleflight: new(singleflight.Group),
name: name,
root: root,
opt: *opt,
endpoint: u,
endpointURL: u.String(),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
precision: fs.ModTimeNotSupported,
}
var client *http.Client
@@ -519,7 +515,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetUserPass(opt.User, opt.Pass)
} else if opt.BearerToken != "" {
f.setBearerToken(opt.BearerToken)
} else if len(f.opt.BearerTokenCommand) != 0 {
} else if f.opt.BearerTokenCommand != "" {
err = f.fetchAndSetBearerToken()
if err != nil {
return nil, err
@@ -566,11 +562,12 @@ func (f *Fs) setBearerToken(token string) {
}
// fetch the bearer token using the command
func (f *Fs) fetchBearerToken(cmd fs.SpaceSepList) (string, error) {
func (f *Fs) fetchBearerToken(cmd string) (string, error) {
var (
args = strings.Split(cmd, " ")
stdout bytes.Buffer
stderr bytes.Buffer
c = exec.Command(cmd[0], cmd[1:]...)
c = exec.Command(args[0], args[1:]...)
)
c.Stdout = &stdout
c.Stderr = &stderr
@@ -610,18 +607,15 @@ func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
// fetch the bearer token and set it if successful
func (f *Fs) fetchAndSetBearerToken() error {
_, err, _ := f.authSingleflight.Do("bearerToken", func() (interface{}, error) {
if len(f.opt.BearerTokenCommand) == 0 {
return nil, nil
}
token, err := f.fetchBearerToken(f.opt.BearerTokenCommand)
if err != nil {
return nil, err
}
f.setBearerToken(token)
return nil, nil
})
return err
if f.opt.BearerTokenCommand == "" {
return nil
}
token, err := f.fetchBearerToken(f.opt.BearerTokenCommand)
if err != nil {
return err
}
f.setBearerToken(token)
return nil
}
// The WebDAV url can optionally be suffixed with a path. This suffix needs to be ignored for determining the temporary upload directory of chunks.
@@ -888,56 +882,30 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
var iErr error
_, err := f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
_, err = f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
if isDir {
d := fs.NewDir(remote, time.Time(info.Modified))
// .SetID(info.ID)
// FIXME more info from dir? can set size, items?
err := list.Add(d)
if err != nil {
iErr = err
return true
}
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
err = list.Add(o)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
})
if err != nil {
return err
return nil, err
}
if iErr != nil {
return iErr
return nil, iErr
}
return list.Flush()
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
@@ -1660,7 +1628,6 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
)

View File

@@ -514,12 +514,11 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" {
// 2 if it fails then create all directories in the path from root.
dirs := strings.Split(dirString, "/") //path separator
var mkdirpath strings.Builder
mkdirpath.WriteString("/") //path separator /
var mkdirpath = "/" //path separator /
for _, element := range dirs {
if element != "" {
mkdirpath.WriteString(element + "/") //path separator /
_ = f.CreateDir(ctx, mkdirpath.String()) // ignore errors while creating dirs
mkdirpath += element + "/" //path separator /
_ = f.CreateDir(ctx, mkdirpath) // ignore errors while creating dirs
}
}
}

View File

@@ -74,7 +74,6 @@ var osarches = []string{
"plan9/amd64",
"solaris/amd64",
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
"aix/ppc64",
}
// Special environment flags for a given arch

View File

@@ -32,7 +32,6 @@ docs = [
"fichier.md",
"alias.md",
"s3.md",
"archive.md",
"b2.md",
"box.md",
"cache.md",

View File

@@ -14,4 +14,4 @@ if [ -z "$globs" ]; then
exit 1
fi
docker run --rm -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs

View File

@@ -13,6 +13,7 @@
// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting
//go:build ruleguard
// +build ruleguard
// Package gorules implementing custom linting rules using ruleguard
package gorules

View File

@@ -5,10 +5,6 @@ import (
// Active commands
_ "github.com/rclone/rclone/cmd"
_ "github.com/rclone/rclone/cmd/about"
_ "github.com/rclone/rclone/cmd/archive"
_ "github.com/rclone/rclone/cmd/archive/create"
_ "github.com/rclone/rclone/cmd/archive/extract"
_ "github.com/rclone/rclone/cmd/archive/list"
_ "github.com/rclone/rclone/cmd/authorize"
_ "github.com/rclone/rclone/cmd/backend"
_ "github.com/rclone/rclone/cmd/bisync"

View File

@@ -1,40 +0,0 @@
//go:build !plan9
// Package archive implements 'rclone archive'.
package archive
import (
"errors"
"github.com/rclone/rclone/cmd"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(Command)
}
// Command - archive command
var Command = &cobra.Command{
Use: "archive <action> [opts] <source> [<destination>]",
Short: `Perform an action on an archive.`,
Long: `Perform an action on an archive. Requires the use of a
subcommand to specify the protocol, e.g.
rclone archive list remote:file.zip
Each subcommand has its own options which you can see in their help.
See [rclone archive create](/commands/rclone_archive_create/) for the
archive formats supported.
`,
Annotations: map[string]string{
"versionIntroduced": "v1.72",
},
RunE: func(command *cobra.Command, args []string) error {
if len(args) == 0 {
return errors.New("archive requires an action, e.g. 'rclone archive list remote:'")
}
return errors.New("unknown action")
},
}

View File

@@ -1,188 +0,0 @@
package archive_test
import (
"context"
"strings"
"testing"
"github.com/mholt/archives"
_ "github.com/rclone/rclone/backend/local"
_ "github.com/rclone/rclone/backend/memory"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/cmd/archive/create"
"github.com/rclone/rclone/cmd/archive/extract"
"github.com/rclone/rclone/cmd/archive/list"
)
var (
t1 = fstest.Time("2017-02-03T04:05:06.499999999Z")
)
// TestMain drives the tests
func TestMain(m *testing.M) {
fstest.TestMain(m)
}
func TestCheckValidDestination(t *testing.T) {
var err error
ctx := context.Background()
r := fstest.NewRun(t)
// create file
r.WriteObject(ctx, "file1.txt", "111", t1)
// test checkValidDestination when file exists
err = create.CheckValidDestination(ctx, r.Fremote, "file1.txt")
require.NoError(t, err)
// test checkValidDestination when file does not exist
err = create.CheckValidDestination(ctx, r.Fremote, "file2.txt")
require.NoError(t, err)
// test checkValidDestination when dest is a directory
if r.Fremote.Features().CanHaveEmptyDirectories {
err = create.CheckValidDestination(ctx, r.Fremote, "")
require.ErrorIs(t, err, fs.ErrorIsDir)
}
// test checkValidDestination when dest does not exists
err = create.CheckValidDestination(ctx, r.Fremote, "dir/file.txt")
require.NoError(t, err)
}
// test archiving to the remote
func testArchiveRemote(t *testing.T, fromLocal bool, subDir string, extension string) {
var err error
ctx := context.Background()
r := fstest.NewRun(t)
var src, dst fs.Fs
var f1, f2, f3 fstest.Item
// create files to archive on src
if fromLocal {
// create files to archive on local
src = r.Flocal
dst = r.Fremote
f1 = r.WriteFile("file1.txt", "content 1", t1)
f2 = r.WriteFile("dir1/sub1.txt", "sub content 1", t1)
f3 = r.WriteFile("dir2/sub2a.txt", "sub content 2a", t1)
} else {
// create files to archive on remote
src = r.Fremote
dst = r.Flocal
f1 = r.WriteObject(ctx, "file1.txt", "content 1", t1)
f2 = r.WriteObject(ctx, "dir1/sub1.txt", "sub content 1", t1)
f3 = r.WriteObject(ctx, "dir2/sub2a.txt", "sub content 2a", t1)
}
fstest.CheckItems(t, src, f1, f2, f3)
// create archive on dst
archiveName := "test." + extension
err = create.ArchiveCreate(ctx, dst, archiveName, src, "", "")
require.NoError(t, err)
// list archive on dst
expected := map[string]int64{
"file1.txt": 9,
"dir1/": 0,
"dir1/sub1.txt": 13,
"dir2/": 0,
"dir2/sub2a.txt": 14,
}
listFile := func(ctx context.Context, f archives.FileInfo) error {
name := f.NameInArchive
gotSize := f.Size()
if f.IsDir() && !strings.HasSuffix(name, "/") {
name += "/"
gotSize = 0
}
wantSize, found := expected[name]
assert.True(t, found, name)
assert.Equal(t, wantSize, gotSize)
delete(expected, name)
return nil
}
err = list.ArchiveList(ctx, dst, archiveName, listFile)
require.NoError(t, err)
assert.Equal(t, 0, len(expected), expected)
// clear the src
require.NoError(t, operations.Purge(ctx, src, ""))
require.NoError(t, src.Mkdir(ctx, ""))
fstest.CheckItems(t, src)
// extract dst archive back to src
err = extract.ArchiveExtract(ctx, src, subDir, dst, archiveName)
require.NoError(t, err)
// check files on src are restored from the archive on dst
items := []fstest.Item{f1, f2, f3}
if subDir != "" {
for i := range items {
item := &items[i]
item.Path = subDir + "/" + item.Path
}
}
fstest.CheckListingWithPrecision(t, src, items, nil, fs.ModTimeNotSupported)
}
func testArchive(t *testing.T) {
var extensions = []string{
"zip",
"tar",
"tar.gz",
"tar.bz2",
"tar.lz",
"tar.lz4",
"tar.xz",
"tar.zst",
"tar.br",
"tar.sz",
"tar.mz",
}
for _, extension := range extensions {
t.Run(extension, func(t *testing.T) {
for _, subDir := range []string{"", "subdir"} {
name := subDir
if name == "" {
name = "root"
}
t.Run(name, func(t *testing.T) {
t.Run("local", func(t *testing.T) {
testArchiveRemote(t, true, name, extension)
})
t.Run("remote", func(t *testing.T) {
testArchiveRemote(t, false, name, extension)
})
})
}
})
}
}
func TestIntegration(t *testing.T) {
testArchive(t)
}
func TestMemory(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("skipping as -remote is set")
}
// Reset -remote to point to :memory:
oldFstestRemoteName := fstest.RemoteName
remoteName := ":memory:"
fstest.RemoteName = &remoteName
defer func() {
fstest.RemoteName = oldFstestRemoteName
}()
fstest.ResetRun()
testArchive(t)
}

View File

@@ -1,7 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements 'rclone archive'.
package archive

View File

@@ -1,388 +0,0 @@
//go:build !plan9
// Package create implements 'rclone archive create'.
package create
import (
"context"
"errors"
"fmt"
"io"
"os"
"path"
"sort"
"strings"
"time"
"github.com/mholt/archives"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/archive"
"github.com/rclone/rclone/cmd/archive/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/spf13/cobra"
)
var (
fullPath = false
prefix = ""
format = ""
)
func init() {
flagSet := Command.Flags()
flags.BoolVarP(flagSet, &fullPath, "full-path", "", fullPath, "Set prefix for files in archive to source path", "")
flags.StringVarP(flagSet, &prefix, "prefix", "", prefix, "Set prefix for files in archive to entered value or source path", "")
flags.StringVarP(flagSet, &format, "format", "", format, "Create the archive with format or guess from extension.", "")
archive.Command.AddCommand(Command)
}
// Command - create
var Command = &cobra.Command{
Use: "create [flags] <source> [<destination>]",
Short: `Archive source file(s) to destination.`,
// Warning! "!" will be replaced by backticks below
Long: strings.ReplaceAll(`
Creates an archive from the files in source:path and saves the archive to
dest:path. If dest:path is missing, it will write to the console.
The valid formats for the !--format! flag are listed below. If
!--format! is not set rclone will guess it from the extension of dest:path.
| Format | Extensions |
|:-------|:-----------|
| zip | .zip |
| tar | .tar |
| tar.gz | .tar.gz, .tgz, .taz |
| tar.bz2| .tar.bz2, .tb2, .tbz, .tbz2, .tz2 |
| tar.lz | .tar.lz |
| tar.lz4| .tar.lz4 |
| tar.xz | .tar.xz, .txz |
| tar.zst| .tar.zst, .tzst |
| tar.br | .tar.br |
| tar.sz | .tar.sz |
| tar.mz | .tar.mz |
The !--prefix! and !--full-path! flags control the prefix for the files
in the archive.
If the flag !--full-path! is set then the files will have the full source
path as the prefix.
If the flag !--prefix=<value>! is set then the files will have
!<value>! as prefix. It's possible to create invalid file names with
!--prefix=<value>! so use with caution. Flag !--prefix! has
priority over !--full-path!.
Given a directory !/sourcedir! with the following:
file1.txt
dir1/file2.txt
Running the command !rclone archive create /sourcedir /dest.tar.gz!
will make an archive with the contents:
file1.txt
dir1/
dir1/file2.txt
Running the command !rclone archive create --full-path /sourcedir /dest.tar.gz!
will make an archive with the contents:
sourcedir/file1.txt
sourcedir/dir1/
sourcedir/dir1/file2.txt
Running the command !rclone archive create --prefix=my_new_path /sourcedir /dest.tar.gz!
will make an archive with the contents:
my_new_path/file1.txt
my_new_path/dir1/
my_new_path/dir1/file2.txt
`, "!", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.72",
},
RunE: func(command *cobra.Command, args []string) error {
var src, dst fs.Fs
var dstFile string
if len(args) == 1 { // source only, archive to stdout
src = cmd.NewFsSrc(args)
} else if len(args) == 2 {
src = cmd.NewFsSrc(args)
dst, dstFile = cmd.NewFsDstFile(args[1:2])
} else {
cmd.CheckArgs(1, 2, command, args)
}
cmd.Run(false, false, command, func() error {
fmt.Printf("dst=%v, dstFile=%q, src=%v, format=%q, prefix=%q\n", dst, dstFile, src, format, prefix)
if prefix != "" {
return ArchiveCreate(context.Background(), dst, dstFile, src, format, prefix)
} else if fullPath {
return ArchiveCreate(context.Background(), dst, dstFile, src, format, src.Root())
}
return ArchiveCreate(context.Background(), dst, dstFile, src, format, "")
})
return nil
},
}
// Globals
var (
archiveFormats = map[string]archives.CompressedArchive{
"zip": archives.CompressedArchive{
Archival: archives.Zip{ContinueOnError: true},
},
"tar": archives.CompressedArchive{
Archival: archives.Tar{ContinueOnError: true},
},
"tar.gz": archives.CompressedArchive{
Compression: archives.Gz{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.bz2": archives.CompressedArchive{
Compression: archives.Bz2{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.lz": archives.CompressedArchive{
Compression: archives.Lzip{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.lz4": archives.CompressedArchive{
Compression: archives.Lz4{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.xz": archives.CompressedArchive{
Compression: archives.Xz{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.zst": archives.CompressedArchive{
Compression: archives.Zstd{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.br": archives.CompressedArchive{
Compression: archives.Brotli{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.sz": archives.CompressedArchive{
Compression: archives.Sz{},
Archival: archives.Tar{ContinueOnError: true},
},
"tar.mz": archives.CompressedArchive{
Compression: archives.MinLZ{},
Archival: archives.Tar{ContinueOnError: true},
},
}
archiveExtensions = map[string]string{
// zip
"*.zip": "zip",
// tar
"*.tar": "tar",
// tar.gz
"*.tar.gz": "tar.gz",
"*.tgz": "tar.gz",
"*.taz": "tar.gz",
// tar.bz2
"*.tar.bz2": "tar.bz2",
"*.tb2": "tar.bz2",
"*.tbz": "tar.bz2",
"*.tbz2": "tar.bz2",
"*.tz2": "tar.bz2",
// tar.lz
"*.tar.lz": "tar.lz",
// tar.lz4
"*.tar.lz4": "tar.lz4",
// tar.xz
"*.tar.xz": "tar.xz",
"*.txz": "tar.xz",
// tar.zst
"*.tar.zst": "tar.zst",
"*.tzst": "tar.zst",
// tar.br
"*.tar.br": "tar.br",
// tar.sz
"*.tar.sz": "tar.sz",
// tar.mz
"*.tar.mz": "tar.mz",
}
)
// sorted FileInfo list
type archivesFileInfoList []archives.FileInfo
func (a archivesFileInfoList) Len() int {
return len(a)
}
func (a archivesFileInfoList) Less(i, j int) bool {
if a[i].FileInfo.IsDir() == a[j].FileInfo.IsDir() {
// both are same type, order by name
return strings.Compare(a[i].NameInArchive, a[j].NameInArchive) < 0
} else if a[i].FileInfo.IsDir() {
return strings.Compare(strings.TrimSuffix(a[i].NameInArchive, "/"), path.Dir(a[j].NameInArchive)) < 0
}
return strings.Compare(path.Dir(a[i].NameInArchive), strings.TrimSuffix(a[j].NameInArchive, "/")) < 0
}
func (a archivesFileInfoList) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func getCompressor(format string, filename string) (archives.CompressedArchive, error) {
var compressor archives.CompressedArchive
var found bool
// make filename lowercase for checks
filename = strings.ToLower(filename)
if format == "" {
// format flag not set, get format from the file extension
for pattern, formatName := range archiveExtensions {
ok, err := path.Match(pattern, filename)
if err != nil {
// error in pattern
return archives.CompressedArchive{}, fmt.Errorf("invalid extension pattern '%s'", pattern)
} else if ok {
// pattern matches filename, get compressor
compressor, found = archiveFormats[formatName]
break
}
}
} else {
// format flag set, look for it
compressor, found = archiveFormats[format]
}
if found {
return compressor, nil
} else if format == "" {
return archives.CompressedArchive{}, fmt.Errorf("format not set and can't be guessed from extension")
}
return archives.CompressedArchive{}, fmt.Errorf("invalid format '%s'", format)
}
// CheckValidDestination - takes (dst, dstFile) and checks it is valid
func CheckValidDestination(ctx context.Context, dst fs.Fs, dstFile string) error {
var err error
// check if dst + dstFile is a file
_, err = dst.NewObject(ctx, dstFile)
if err == nil {
// (dst, dstFile) is a valid file we can overwrite
return nil
} else if errors.Is(err, fs.ErrorIsDir) {
// dst is a directory
return fmt.Errorf("destination must not be a directory: %w", err)
} else if !errors.Is(err, fs.ErrorObjectNotFound) {
// dst is a directory (we need a filename) or some other error happened
// not good, leave
return fmt.Errorf("error reading destination: %w", err)
}
// if we are here dst points to a non existent path
return nil
}
func loadMetadata(ctx context.Context, o fs.DirEntry) fs.Metadata {
meta, err := fs.GetMetadata(ctx, o)
if err != nil {
meta = make(fs.Metadata, 0)
}
return meta
}
// ArchiveCreate - compresses/archive source to destination
func ArchiveCreate(ctx context.Context, dst fs.Fs, dstFile string, src fs.Fs, format string, prefix string) error {
var err error
var list archivesFileInfoList
var compArchive archives.CompressedArchive
var totalLength int64
// check id dst is valid
err = CheckValidDestination(ctx, dst, dstFile)
if err != nil {
return err
}
ci := fs.GetConfig(ctx)
fi := filter.GetConfig(ctx)
// get archive format
compArchive, err = getCompressor(format, dstFile)
if err != nil {
return err
}
// get source files
err = walk.ListR(ctx, src, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
// get directories
entries.ForDir(func(o fs.Directory) {
var metadata fs.Metadata
if ci.Metadata {
metadata = loadMetadata(ctx, o)
}
if fi.Include(o.Remote(), o.Size(), o.ModTime(ctx), metadata) {
info := files.NewArchiveFileInfo(ctx, o, prefix, metadata)
list = append(list, info)
}
})
// get files
entries.ForObject(func(o fs.Object) {
var metadata fs.Metadata
if ci.Metadata {
metadata = loadMetadata(ctx, o)
}
if fi.Include(o.Remote(), o.Size(), o.ModTime(ctx), metadata) {
info := files.NewArchiveFileInfo(ctx, o, prefix, metadata)
list = append(list, info)
totalLength += o.Size()
}
})
return nil
})
if err != nil {
return err
} else if list.Len() == 0 {
return fmt.Errorf("no files found in source")
}
sort.Stable(list)
// create archive
if ci.DryRun {
// write nowhere
counter := files.NewCountWriter(nil)
err = compArchive.Archive(ctx, counter, list)
// log totals
fs.Infof(nil, "Total files added %d", list.Len())
fs.Infof(nil, "Total bytes read %d", totalLength)
fs.Infof(nil, "Compressed file size %d", counter.Count())
return err
} else if dst == nil {
// write to stdout
counter := files.NewCountWriter(os.Stdout)
err = compArchive.Archive(ctx, counter, list)
// log totals
fs.Infof(nil, "Total files added %d", list.Len())
fs.Infof(nil, "Total bytes read %d", totalLength)
fs.Infof(nil, "Compressed file size %d", counter.Count())
return err
}
// write to remote
pipeReader, pipeWriter := io.Pipe()
// write to pipewriter in background
counter := files.NewCountWriter(pipeWriter)
go func() {
err := compArchive.Archive(ctx, counter, list)
pipeWriter.CloseWithError(err)
}()
// rcat to remote from pipereader
_, err = operations.Rcat(ctx, dst, dstFile, pipeReader, time.Now(), nil)
// log totals
fs.Infof(nil, "Total files added %d", list.Len())
fs.Infof(nil, "Total bytes read %d", totalLength)
fs.Infof(nil, "Compressed file size %d", counter.Count())
return err
}

View File

@@ -1,7 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements 'rclone archive create'.
package create

View File

@@ -1,191 +0,0 @@
//go:build !plan9
// Package extract implements 'rclone archive extract'
package extract
import (
"context"
"errors"
"fmt"
"path"
"strings"
"github.com/mholt/archives"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/archive"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
archive.Command.AddCommand(Command)
}
// Command - extract
var Command = &cobra.Command{
Use: "extract [flags] <source> <destination>",
Short: `Extract archives from source to destination.`,
Long: strings.ReplaceAll(`
Extract the archive contents to a destination directory auto detecting
the format. See [rclone archive create](/commands/rclone_archive_create/)
for the archive formats supported.
For example on this archive:
|||
$ rclone archive list --long remote:archive.zip
6 2025-10-30 09:46:23.000000000 file.txt
0 2025-10-30 09:46:57.000000000 dir/
4 2025-10-30 09:46:57.000000000 dir/bye.txt
|||
You can run extract like this
|||
$ rclone archive extract remote:archive.zip remote:extracted
|||
Which gives this result
|||
$ rclone tree remote:extracted
/
├── dir
│ └── bye.txt
└── file.txt
|||
The source or destination or both can be local or remote.
Filters can be used to only extract certain files:
|||
$ rclone archive extract archive.zip partial --include "bye.*"
$ rclone tree partial
/
└── dir
└── bye.txt
|||
The [archive backend](/archive/) can also be used to extract files. It
can be used to read only mount archives also but it supports a
different set of archive formats to the archive commands.
`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.72",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 2, command, args)
src, srcFile := cmd.NewFsFile(args[0])
dst, dstFile := cmd.NewFsFile(args[1])
cmd.Run(false, false, command, func() error {
return ArchiveExtract(context.Background(), dst, dstFile, src, srcFile)
})
return nil
},
}
// ArchiveExtract extracts files from (src, srcFile) to (dst, dstDir)
func ArchiveExtract(ctx context.Context, dst fs.Fs, dstDir string, src fs.Fs, srcFile string) error {
var srcObj fs.Object
var filesExtracted = 0
var err error
fi := filter.GetConfig(ctx)
ci := fs.GetConfig(ctx)
// get source object
srcObj, err = src.NewObject(ctx, srcFile)
fs.Debugf(nil, "srcFile: %q, src : %v", srcFile, src)
if errors.Is(err, fs.ErrorIsDir) {
return fmt.Errorf("source can't be a directory: %w", err)
} else if errors.Is(err, fs.ErrorObjectNotFound) {
return fmt.Errorf("source not found: %w", err)
} else if err != nil {
return fmt.Errorf("unable to access source: %w", err)
}
fs.Debugf(nil, "Source archive file: %s/%s", src.Root(), srcFile)
// Create destination directory
err = dst.Mkdir(ctx, dstDir)
if err != nil {
return fmt.Errorf("unable to access destination: %w", err)
}
fs.Debugf(dst, "Destination for extracted files: %q", dstDir)
// start accounting
tr := accounting.Stats(ctx).NewTransfer(srcObj, nil)
defer tr.Done(ctx, err)
// open source
var options []fs.OpenOption
for _, option := range fs.GetConfig(ctx).DownloadHeaders {
options = append(options, option)
}
in0, err := operations.Open(ctx, srcObj, options...)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", srcFile, err)
}
// account and buffer the transfer
// in = tr.Account(ctx, in).WithBuffer()
in := tr.Account(ctx, in0)
// identify format
format, _, err := archives.Identify(ctx, "", in)
if err != nil {
return fmt.Errorf("failed to open check file type: %w", err)
}
fs.Debugf(nil, "Extract %s/%s, format %s to %s", src.Root(), srcFile, strings.TrimPrefix(format.Extension(), "."), dst.Root())
// check if extract is supported by format
ex, isExtract := format.(archives.Extraction)
if !isExtract {
return fmt.Errorf("extraction for %s not supported", strings.TrimPrefix(format.Extension(), "."))
}
// extract files
err = ex.Extract(ctx, in, func(ctx context.Context, f archives.FileInfo) error {
remote := f.NameInArchive
if dstDir != "" {
remote = path.Join(dstDir, remote)
}
// check if file should be extracted
if !fi.Include(remote, f.Size(), f.ModTime(), fs.Metadata{}) {
return nil
}
// process directory
if f.IsDir() {
// directory
fs.Debugf(nil, "mkdir %s", remote)
// leave if --dry-run set
if ci.DryRun {
return nil
}
// create the directory
return operations.Mkdir(ctx, dst, remote)
}
// process file
fs.Debugf(nil, "Extract %s", remote)
// leave if --dry-run set
if ci.DryRun {
filesExtracted++
return nil
}
// open file
fin, err := f.Open()
if err != nil {
return err
}
// extract the file to destination
_, err = operations.Rcat(ctx, dst, remote, fin, f.ModTime(), nil)
if err == nil {
filesExtracted++
}
return err
})
fs.Infof(nil, "Total files extracted %d", filesExtracted)
return err
}

View File

@@ -1,7 +0,0 @@
// Build for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9
// Package archive implements 'rclone archive extract'.
package extract

View File

@@ -1,34 +0,0 @@
package files
import (
"io"
"sync/atomic"
)
// CountWriter counts bytes written through it.
// It is safe for concurrent Count/Reset; Write is as safe as the wrapped Writer.
type CountWriter struct {
w io.Writer
count atomic.Uint64
}
// NewCountWriter wraps w (use nil if you want to drop data).
func NewCountWriter(w io.Writer) *CountWriter {
if w == nil {
w = io.Discard
}
return &CountWriter{w: w}
}
func (cw *CountWriter) Write(p []byte) (int, error) {
n, err := cw.w.Write(p)
if n > 0 {
cw.count.Add(uint64(n))
}
return n, err
}
// Count returns the total bytes written.
func (cw *CountWriter) Count() uint64 {
return cw.count.Load()
}

View File

@@ -1,109 +0,0 @@
package files
import (
"errors"
"io"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type stubWriter struct {
n int
err error
}
func (s stubWriter) Write(p []byte) (int, error) {
if s.n > len(p) {
return len(p), s.err
}
return s.n, s.err
}
func TestCountWriter(t *testing.T) {
t.Parallel()
t.Run("initial count is zero", func(t *testing.T) {
cw := NewCountWriter(io.Discard)
require.Equal(t, uint64(0), cw.Count())
})
t.Run("counts bytes with real writes", func(t *testing.T) {
cw := NewCountWriter(io.Discard)
n, err := cw.Write([]byte("abcd"))
require.NoError(t, err)
require.Equal(t, 4, n)
assert.Equal(t, uint64(4), cw.Count())
n, err = cw.Write([]byte("xyz"))
require.NoError(t, err)
require.Equal(t, 3, n)
assert.Equal(t, uint64(7), cw.Count())
})
t.Run("nil writer uses io.Discard", func(t *testing.T) {
cw := NewCountWriter(nil)
n, err := cw.Write([]byte("ok"))
require.NoError(t, err)
require.Equal(t, 2, n)
assert.Equal(t, uint64(2), cw.Count())
})
t.Run("zero-length write does not change count", func(t *testing.T) {
cw := NewCountWriter(io.Discard)
n, err := cw.Write(nil)
require.NoError(t, err)
require.Equal(t, 0, n)
assert.Equal(t, uint64(0), cw.Count())
})
t.Run("partial write with error counts n and returns error", func(t *testing.T) {
s := stubWriter{n: 3, err: errors.New("boom")}
cw := NewCountWriter(s)
n, err := cw.Write([]byte("abcdef"))
require.Error(t, err)
require.Equal(t, 3, n)
assert.Equal(t, uint64(3), cw.Count())
})
t.Run("short successful write counts returned n", func(t *testing.T) {
s := stubWriter{n: 1}
cw := NewCountWriter(s)
n, err := cw.Write([]byte("hi"))
require.NoError(t, err)
require.Equal(t, 1, n)
assert.Equal(t, uint64(1), cw.Count())
})
}
func TestCountWriterConcurrent(t *testing.T) {
t.Parallel()
const (
goroutines = 32
loops = 200
chunkSize = 64
)
data := make([]byte, chunkSize)
cw := NewCountWriter(io.Discard)
var wg sync.WaitGroup
wg.Add(goroutines)
for g := 0; g < goroutines; g++ {
go func() {
defer wg.Done()
for i := 0; i < loops; i++ {
n, err := cw.Write(data)
assert.NoError(t, err)
assert.Equal(t, chunkSize, n)
}
}()
}
wg.Wait()
want := uint64(goroutines * loops * chunkSize)
assert.Equal(t, want, cw.Count())
}

Some files were not shown because too many files have changed in this diff Show More