mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
126 Commits
fix-rc-del
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46bc876085 | ||
|
|
a58efc1544 | ||
|
|
4f5efe2871 | ||
|
|
6d9f4a3c20 | ||
|
|
cc09978b79 | ||
|
|
409dc75328 | ||
|
|
fb30c5f8dd | ||
|
|
203df6cc58 | ||
|
|
459e10d599 | ||
|
|
1ba4fd1d83 | ||
|
|
77553b8dd5 | ||
|
|
5420dbbe38 | ||
|
|
87b71dd6b9 | ||
|
|
a0bcdc2638 | ||
|
|
e42fa9f92d | ||
|
|
4586104dc7 | ||
|
|
c4c360a285 | ||
|
|
ce4860b9b6 | ||
|
|
ed87f82d21 | ||
|
|
0a82929b94 | ||
|
|
1e8ee3b813 | ||
|
|
eaab3f5271 | ||
|
|
25b05f1210 | ||
|
|
2dc1b07863 | ||
|
|
49acacec2e | ||
|
|
70d2fe6568 | ||
|
|
f28c83c6de | ||
|
|
2cf44e584c | ||
|
|
bba9027817 | ||
|
|
51859af8d9 | ||
|
|
4f60f8915d | ||
|
|
6663eb346f | ||
|
|
1d0e1ea0b5 | ||
|
|
71631621c4 | ||
|
|
31e904d84c | ||
|
|
30c9843e3d | ||
|
|
c8a834f0e8 | ||
|
|
b272c50c4c | ||
|
|
b8700e8042 | ||
|
|
73193b0565 | ||
|
|
c4eef3065f | ||
|
|
ba2a642961 | ||
|
|
979c6a573d | ||
|
|
bbb866018e | ||
|
|
7706f02294 | ||
|
|
6df7913181 | ||
|
|
c079495d1f | ||
|
|
3bf1ac5b07 | ||
|
|
091caa34c6 | ||
|
|
d507e9be39 | ||
|
|
40b3251e41 | ||
|
|
484d955ea8 | ||
|
|
8fa9f255a0 | ||
|
|
e7f11af1ca | ||
|
|
0b5c4cc442 | ||
|
|
178ddafdc7 | ||
|
|
ad316ec6e3 | ||
|
|
61b022dfc3 | ||
|
|
1903b4c1a2 | ||
|
|
f7cbcf556f | ||
|
|
3581e628c0 | ||
|
|
62c41bf449 | ||
|
|
c5864e113b | ||
|
|
39259a5bd1 | ||
|
|
2e376eb3b9 | ||
|
|
de8e9d4693 | ||
|
|
710cf49bc6 | ||
|
|
8dacac60ea | ||
|
|
3a80d4d4b4 | ||
|
|
a531f987a8 | ||
|
|
e906b8d0c4 | ||
|
|
a5932ef91a | ||
|
|
3afa563eaf | ||
|
|
9d9654b31f | ||
|
|
cfe257f13d | ||
|
|
0375efbd35 | ||
|
|
cad1954213 | ||
|
|
604e37caa5 | ||
|
|
b249d384b9 | ||
|
|
04e91838db | ||
|
|
94829aaec5 | ||
|
|
f574e3395c | ||
|
|
2bc155a96a | ||
|
|
adc8ea3427 | ||
|
|
068eea025c | ||
|
|
4510aa679a | ||
|
|
79281354c7 | ||
|
|
f57a178719 | ||
|
|
44f2e2ed39 | ||
|
|
13e1752d94 | ||
|
|
bb82c0e43b | ||
|
|
1af7151e73 | ||
|
|
fd63478ed6 | ||
|
|
5133b05c74 | ||
|
|
6ba96ede4b | ||
|
|
2896973964 | ||
|
|
be123d85ff | ||
|
|
b1b9562ab7 | ||
|
|
5146b66569 | ||
|
|
8898372d5a | ||
|
|
091fe9e453 | ||
|
|
8fdb68e41a | ||
|
|
c124aa2ed3 | ||
|
|
54e8bb89f7 | ||
|
|
50c1b594ab | ||
|
|
72437a9ca2 | ||
|
|
8ed55c61e1 | ||
|
|
bd598c1ceb | ||
|
|
7e30665102 | ||
|
|
d44957a09c | ||
|
|
37524e2dea | ||
|
|
2f6a6c8233 | ||
|
|
4ad40b6554 | ||
|
|
4f33d64f25 | ||
|
|
519623d9f1 | ||
|
|
913278327b | ||
|
|
a9b05e4c7a | ||
|
|
5d6d79e7d4 | ||
|
|
11de074cbf | ||
|
|
e9ab177a32 | ||
|
|
f3f4fba98d | ||
|
|
03fccdd67b | ||
|
|
231083647e | ||
|
|
0e203a7546 | ||
|
|
a7dd787569 | ||
|
|
689555033e |
@@ -183,7 +183,7 @@ jobs:
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload Image Digest
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
path: /tmp/digests/*
|
||||
@@ -198,7 +198,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download Image Digests
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
|
||||
@@ -19,6 +19,11 @@ linters:
|
||||
- unconvert
|
||||
# Configure checks. Mostly using defaults but with some commented exceptions.
|
||||
settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
- fieldalignment
|
||||
- shadow
|
||||
staticcheck:
|
||||
# With staticcheck there is only one setting, so to extend the implicit
|
||||
# default value it must be explicitly included.
|
||||
|
||||
@@ -621,44 +621,7 @@ in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Adding a new s3 provider
|
||||
|
||||
It is quite easy to add a new S3 provider to rclone.
|
||||
|
||||
You'll need to modify the following files
|
||||
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Make sure this is in alphabetical order in the `Providers` section.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- `README.md` - this is the home page in github
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
|
||||
When adding the provider, endpoints, quirks, docs etc keep them in
|
||||
alphabetical order by `Provider` name, but with `AWS` first and
|
||||
`Other` last.
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
Once you've written the code, test `rclone config` works to your
|
||||
satisfaction, and check the integration tests work `go test -v -remote
|
||||
NewS3Provider:`. You may need to adjust the quirks to get them to
|
||||
pass. Some providers just can't pass the tests with control characters
|
||||
in the names so if these fail and the provider doesn't support
|
||||
`urlEncodeListings` in the quirks then ignore them. Note that the
|
||||
`SetTier` test may also fail on non AWS providers.
|
||||
|
||||
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
|
||||
[Please see the guide in the S3 backend directory](backend/s3/README.md).
|
||||
|
||||
## Writing a plugin
|
||||
|
||||
|
||||
12
Makefile
12
Makefile
@@ -114,21 +114,21 @@ release_dep_linux:
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
go get -u -t ./...
|
||||
go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ directories to and from different cloud storage providers.
|
||||
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
|
||||
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
|
||||
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
|
||||
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
|
||||
- Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
@@ -50,6 +51,7 @@ directories to and from different cloud storage providers.
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
@@ -95,6 +97,7 @@ directories to and from different cloud storage providers.
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
@@ -104,6 +107,7 @@ directories to and from different cloud storage providers.
|
||||
- Seagate Lyve Cloud [:page_facing_up:](https://rclone.org/s3/#lyve)
|
||||
- SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
|
||||
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
|
||||
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
|
||||
@@ -127,6 +131,7 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
These backends adapt or modify other storage providers
|
||||
|
||||
- Alias: rename existing remotes [:page_facing_up:](https://rclone.org/alias/)
|
||||
- Archive: read archive files [:page_facing_up:](https://rclone.org/archive/)
|
||||
- Cache: cache remotes (DEPRECATED) [:page_facing_up:](https://rclone.org/cache/)
|
||||
- Chunker: split large files [:page_facing_up:](https://rclone.org/chunker/)
|
||||
- Combine: combine multiple remotes into a directory tree [:page_facing_up:](https://rclone.org/combine/)
|
||||
|
||||
@@ -4,6 +4,7 @@ package all
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/archive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
|
||||
679
backend/archive/archive.go
Normal file
679
backend/archive/archive.go
Normal file
@@ -0,0 +1,679 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Package archive implements a backend to access archive files in a remote
|
||||
package archive
|
||||
|
||||
// FIXME factor common code between backends out - eg VFS initialization
|
||||
|
||||
// FIXME can we generalize the VFS handle caching and use it in zip backend
|
||||
|
||||
// Factor more stuff out if possible
|
||||
|
||||
// Odd stats which are probably coming from the VFS
|
||||
// * tensorflow.sqfs: 0% /3.074Gi, 204.426Ki/s, 4h22m46s
|
||||
|
||||
// FIXME this will perform poorly for unpacking as the VFS Reader is bad
|
||||
// at multiple streams - need cache mode setting?
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
// Import all the required archivers here
|
||||
_ "github.com/rclone/rclone/backend/archive/squashfs"
|
||||
_ "github.com/rclone/rclone/backend/archive/zip"
|
||||
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "archive",
|
||||
Description: "Read archives",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
Help: `Any metadata supported by the underlying remote is read and written.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: `Remote to wrap to read archives from.
|
||||
|
||||
Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
|
||||
"myremote:bucket" or "myremote:".
|
||||
|
||||
If this is left empty, then the archive backend will use the root as
|
||||
the remote.
|
||||
|
||||
This means that you can use :archive:remote:path and it will be
|
||||
equivalent to setting remote="remote:path".
|
||||
`,
|
||||
Required: false,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Remote string `config:"remote"`
|
||||
}
|
||||
|
||||
// Fs represents a archive of upstreams
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
features *fs.Features // optional features
|
||||
opt Options // options for this Fs
|
||||
root string // the path we are working on
|
||||
f fs.Fs // remote we are wrapping
|
||||
wrapper fs.Fs // fs that wraps us
|
||||
|
||||
mu sync.Mutex // protects the below
|
||||
archives map[string]*archive // the archives we have, by path
|
||||
}
|
||||
|
||||
// A single open archive
|
||||
type archive struct {
|
||||
archiver archiver.Archiver // archiver responsible
|
||||
remote string // path to the archive
|
||||
prefix string // prefix to add on to listings
|
||||
root string // root of the archive to remove from listings
|
||||
mu sync.Mutex // protects the following variables
|
||||
f fs.Fs // the archive Fs, may be nil
|
||||
}
|
||||
|
||||
// If remote is an archive then return it otherwise return nil
|
||||
func findArchive(remote string) *archive {
|
||||
// FIXME use something faster than linear search?
|
||||
for _, archiver := range archiver.Archivers {
|
||||
if strings.HasSuffix(remote, archiver.Extension) {
|
||||
return &archive{
|
||||
archiver: archiver,
|
||||
remote: remote,
|
||||
prefix: remote,
|
||||
root: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find an archive buried in remote
|
||||
func subArchive(remote string) *archive {
|
||||
archive := findArchive(remote)
|
||||
if archive != nil {
|
||||
return archive
|
||||
}
|
||||
parent := path.Dir(remote)
|
||||
if parent == "/" || parent == "." {
|
||||
return nil
|
||||
}
|
||||
return subArchive(parent)
|
||||
}
|
||||
|
||||
// If remote is an archive then return it otherwise return nil
|
||||
func (f *Fs) findArchive(remote string) (archive *archive) {
|
||||
archive = findArchive(remote)
|
||||
if archive != nil {
|
||||
f.mu.Lock()
|
||||
f.archives[remote] = archive
|
||||
f.mu.Unlock()
|
||||
}
|
||||
return archive
|
||||
}
|
||||
|
||||
// Instantiate archive if it hasn't been instantiated yet
|
||||
//
|
||||
// This is done lazily so that we can list a directory full of
|
||||
// archives without opening them all.
|
||||
func (a *archive) init(ctx context.Context, f fs.Fs) (fs.Fs, error) {
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
if a.f != nil {
|
||||
return a.f, nil
|
||||
}
|
||||
newFs, err := a.archiver.New(ctx, f, a.remote, a.prefix, a.root)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("failed to create archive %q: %w", a.remote, err)
|
||||
}
|
||||
a.f = newFs
|
||||
return a.f, nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs.Fs, err error) {
|
||||
// defer log.Trace(nil, "name=%q, root=%q, m=%v", name, root, m)("f=%+v, err=%v", &outFs, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := opt.Remote
|
||||
origRoot := root
|
||||
|
||||
// If remote is empty, use the root instead
|
||||
if remote == "" {
|
||||
remote = root
|
||||
root = ""
|
||||
}
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
remote = strings.TrimRight(remote, "/")
|
||||
if remote == "" {
|
||||
remote = "/"
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point archive remote at itself - check the value of the upstreams setting")
|
||||
}
|
||||
|
||||
_ = isDirectory
|
||||
|
||||
foundArchive := subArchive(remote)
|
||||
if foundArchive != nil {
|
||||
fs.Debugf(nil, "Found archiver for %q remote %q", foundArchive.archiver.Extension, foundArchive.remote)
|
||||
// Archive path
|
||||
foundArchive.root = strings.Trim(remote[len(foundArchive.remote):], "/")
|
||||
// Path to the archive
|
||||
archiveRemote := remote[:len(foundArchive.remote)]
|
||||
// Remote is archive leaf name
|
||||
foundArchive.remote = path.Base(archiveRemote)
|
||||
foundArchive.prefix = ""
|
||||
// Point remote to archive file
|
||||
remote = archiveRemote
|
||||
}
|
||||
|
||||
// Make sure to remove trailing . referring to the current dir
|
||||
if path.Base(root) == "." {
|
||||
root = strings.TrimSuffix(root, ".")
|
||||
}
|
||||
remotePath := fspath.JoinRootPath(remote, root)
|
||||
wrappedFs, err := cache.Get(ctx, remotePath)
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, fmt.Errorf("failed to make remote %q to wrap: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
//root: path.Join(remotePath, root),
|
||||
root: origRoot,
|
||||
opt: *opt,
|
||||
f: wrappedFs,
|
||||
archives: make(map[string]*archive),
|
||||
}
|
||||
cache.PinUntilFinalized(f.f, f)
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if foundArchive != nil {
|
||||
fs.Debugf(f, "Root is an archive")
|
||||
if err != fs.ErrorIsFile {
|
||||
return nil, fmt.Errorf("expecting to find a file at %q", remote)
|
||||
}
|
||||
return foundArchive.init(ctx, f.f)
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
if err == fs.ErrorIsFile {
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("archive root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.f.Rmdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return f.f.Hashes()
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.f.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
do := f.f.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do(ctx, dir)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.f.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// FIXME
|
||||
// o, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// return nil, fs.ErrorCantCopy
|
||||
// }
|
||||
return do(ctx, src, remote)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.f.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
// FIXME
|
||||
// o, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// return nil, fs.ErrorCantMove
|
||||
// }
|
||||
return do(ctx, src, remote)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
do := f.f.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(ctx, srcFs.f, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
// At least one value will be written to the channel,
|
||||
// specifying the initial value and updated values might
|
||||
// follow. A 0 Duration should pause the polling.
|
||||
// The ChangeNotify implementation must empty the channel
|
||||
// regularly. When the channel gets closed, the implementation
|
||||
// should stop polling and release resources.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||
do := f.f.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
notifyFunc(path, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, ch)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.f.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
||||
var o fs.Object
|
||||
var err error
|
||||
if stream {
|
||||
o, err = f.f.Features().PutStream(ctx, in, src, options...)
|
||||
} else {
|
||||
o, err = f.f.Put(ctx, in, src, options...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, false, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f.put(ctx, in, src, true, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.f.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Find the Fs for the directory
|
||||
func (f *Fs) findFs(ctx context.Context, dir string) (subFs fs.Fs, err error) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
subFs = f.f
|
||||
|
||||
// FIXME should do this with a better datastructure like a prefix tree
|
||||
// FIXME want to find the longest first otherwise nesting won't work
|
||||
dirSlash := dir + "/"
|
||||
for archiverRemote, archive := range f.archives {
|
||||
subRemote := archiverRemote + "/"
|
||||
if strings.HasPrefix(dirSlash, subRemote) {
|
||||
subFs, err = archive.init(ctx, f.f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return subFs, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(f, "dir=%q", dir)("entries = %v, err=%v", &entries, &err)
|
||||
|
||||
subFs, err := f.findFs(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries, err = subFs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, entry := range entries {
|
||||
// Can only unarchive files
|
||||
if o, ok := entry.(fs.Object); ok {
|
||||
remote := o.Remote()
|
||||
archive := f.findArchive(remote)
|
||||
if archive != nil {
|
||||
// Overwrite entry with directory
|
||||
entries[i] = fs.NewDir(remote, o.ModTime(ctx))
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject creates a new remote archive file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
dir := path.Dir(remote)
|
||||
if dir == "/" || dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
|
||||
subFs, err := f.findFs(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o, err := subFs.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision is the greatest precision of all the archivers
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if do := f.f.Features().Shutdown; do != nil {
|
||||
return do(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
do := f.f.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
return do(ctx, remote, expire, unlink)
|
||||
}
|
||||
|
||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
//
|
||||
// May create duplicates or return errors if src already
|
||||
// exists.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.f.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
o, err := do(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
if len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
do := f.f.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
return do(ctx, dirs)
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.f.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("not supported by underlying remote")
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
do := f.f.Features().OpenWriterAt
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, remote, size)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
do := f.f.Features().OpenChunkWriter
|
||||
if do == nil {
|
||||
return info, nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, remote, src, options...)
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.f.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.f.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.OpenChunkWriter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
// FIXME _ fs.FullObject = (*Object)(nil)
|
||||
)
|
||||
221
backend/archive/archive_internal_test.go
Normal file
221
backend/archive/archive_internal_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
//go:build !plan9
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// FIXME need to test Open with seek
|
||||
|
||||
// run - run a shell command
|
||||
func run(t *testing.T, args ...string) {
|
||||
cmd := exec.Command(args[0], args[1:]...)
|
||||
fs.Debugf(nil, "run args = %v", args)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf(`
|
||||
----------------------------
|
||||
Failed to run %v: %v
|
||||
Command output was:
|
||||
%s
|
||||
----------------------------
|
||||
`, args, err, out)
|
||||
}
|
||||
}
|
||||
|
||||
// check the dst and src are identical
|
||||
func checkTree(ctx context.Context, name string, t *testing.T, dstArchive, src string, expectedCount int) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
fs.Debugf(nil, "check %q vs %q", dstArchive, src)
|
||||
Farchive, err := cache.Get(ctx, dstArchive)
|
||||
if err != fs.ErrorIsFile {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
Fsrc, err := cache.Get(ctx, src)
|
||||
if err != fs.ErrorIsFile {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
var matches bytes.Buffer
|
||||
opt := operations.CheckOpt{
|
||||
Fdst: Farchive,
|
||||
Fsrc: Fsrc,
|
||||
Match: &matches,
|
||||
}
|
||||
|
||||
for _, action := range []string{"Check", "Download"} {
|
||||
t.Run(action, func(t *testing.T) {
|
||||
matches.Reset()
|
||||
if action == "Download" {
|
||||
assert.NoError(t, operations.CheckDownload(ctx, &opt))
|
||||
} else {
|
||||
assert.NoError(t, operations.Check(ctx, &opt))
|
||||
}
|
||||
if expectedCount > 0 {
|
||||
assert.Equal(t, expectedCount, strings.Count(matches.String(), "\n"))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
// Check we can run NewObject on all files and read them
|
||||
assert.NoError(t, operations.ListFn(ctx, Fsrc, func(srcObj fs.Object) {
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
remote := srcObj.Remote()
|
||||
archiveObj, err := Farchive.NewObject(ctx, remote)
|
||||
require.NoError(t, err, remote)
|
||||
assert.Equal(t, remote, archiveObj.Remote(), remote)
|
||||
|
||||
// Test that the contents are the same
|
||||
archiveBuf := fstests.ReadObject(ctx, t, archiveObj, -1)
|
||||
srcBuf := fstests.ReadObject(ctx, t, srcObj, -1)
|
||||
assert.Equal(t, srcBuf, archiveBuf)
|
||||
|
||||
if len(srcBuf) < 81 {
|
||||
return
|
||||
}
|
||||
|
||||
// Tests that Open works with SeekOption
|
||||
assert.Equal(t, srcBuf[50:], fstests.ReadObject(ctx, t, archiveObj, -1, &fs.SeekOption{Offset: 50}), "contents differ after seek")
|
||||
|
||||
// Tests that Open works with RangeOption
|
||||
for _, test := range []struct {
|
||||
ro fs.RangeOption
|
||||
wantStart, wantEnd int
|
||||
}{
|
||||
{fs.RangeOption{Start: 5, End: 15}, 5, 16},
|
||||
{fs.RangeOption{Start: 80, End: -1}, 80, len(srcBuf)},
|
||||
{fs.RangeOption{Start: 81, End: 100000}, 81, len(srcBuf)},
|
||||
{fs.RangeOption{Start: -1, End: 20}, len(srcBuf) - 20, len(srcBuf)}, // if start is omitted this means get the final bytes
|
||||
// {fs.RangeOption{Start: -1, End: -1}, 0, len(srcBuf)}, - this seems to work but the RFC doesn't define it
|
||||
} {
|
||||
got := fstests.ReadObject(ctx, t, archiveObj, -1, &test.ro)
|
||||
foundAt := strings.Index(srcBuf, got)
|
||||
help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got))
|
||||
assert.Equal(t, srcBuf[test.wantStart:test.wantEnd], got, help)
|
||||
}
|
||||
|
||||
// Test that the modtimes are correct
|
||||
fstest.AssertTimeEqualWithPrecision(t, remote, srcObj.ModTime(ctx), archiveObj.ModTime(ctx), Farchive.Precision())
|
||||
|
||||
// Test that the sizes are correct
|
||||
assert.Equal(t, srcObj.Size(), archiveObj.Size())
|
||||
|
||||
// Test that Strings are OK
|
||||
assert.Equal(t, srcObj.String(), archiveObj.String())
|
||||
}))
|
||||
})
|
||||
|
||||
// t.Logf("Fdst ------------- %v", Fdst)
|
||||
// operations.List(ctx, Fdst, os.Stdout)
|
||||
// t.Logf("Fsrc ------------- %v", Fsrc)
|
||||
// operations.List(ctx, Fsrc, os.Stdout)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and zip as external binaries.
|
||||
func testArchive(t *testing.T, archiveName string, archiveFn func(t *testing.T, output, input string)) {
|
||||
ctx := context.Background()
|
||||
checkFiles := 1000
|
||||
|
||||
// create random test input files
|
||||
inputRoot := t.TempDir()
|
||||
input := filepath.Join(inputRoot, archiveName)
|
||||
require.NoError(t, os.Mkdir(input, 0777))
|
||||
run(t, "rclone", "test", "makefiles", "--files", strconv.Itoa(checkFiles), "--ascii", input)
|
||||
|
||||
// Create the archive
|
||||
output := t.TempDir()
|
||||
zipFile := path.Join(output, archiveName)
|
||||
archiveFn(t, zipFile, input)
|
||||
|
||||
// Check the archive itself
|
||||
checkTree(ctx, "Archive", t, ":archive:"+zipFile, input, checkFiles)
|
||||
|
||||
// Now check a subdirectory
|
||||
fis, err := os.ReadDir(input)
|
||||
require.NoError(t, err)
|
||||
subDir := "NOT FOUND"
|
||||
aFile := "NOT FOUND"
|
||||
for _, fi := range fis {
|
||||
if fi.IsDir() {
|
||||
subDir = fi.Name()
|
||||
} else {
|
||||
aFile = fi.Name()
|
||||
}
|
||||
}
|
||||
checkTree(ctx, "SubDir", t, ":archive:"+zipFile+"/"+subDir, filepath.Join(input, subDir), 0)
|
||||
|
||||
// Now check a single file
|
||||
fiCtx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ "+aFile))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
checkTree(fiCtx, "SingleFile", t, ":archive:"+zipFile+"/"+aFile, filepath.Join(input, aFile), 0)
|
||||
|
||||
// Now check the level above
|
||||
checkTree(ctx, "Root", t, ":archive:"+output, inputRoot, checkFiles)
|
||||
// run(t, "cp", "-a", inputRoot, output, "/tmp/test-"+archiveName)
|
||||
}
|
||||
|
||||
// Make sure we have the executable named
|
||||
func skipIfNoExe(t *testing.T, exeName string) {
|
||||
_, err := exec.LookPath(exeName)
|
||||
if err != nil {
|
||||
t.Skipf("%s executable not installed", exeName)
|
||||
}
|
||||
}
|
||||
|
||||
// Test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and zip as external binaries.
|
||||
func TestArchiveZip(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
skipIfNoExe(t, "zip")
|
||||
skipIfNoExe(t, "rclone")
|
||||
testArchive(t, "test.zip", func(t *testing.T, output, input string) {
|
||||
oldcwd, err := os.Getwd()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.Chdir(input))
|
||||
defer func() {
|
||||
require.NoError(t, os.Chdir(oldcwd))
|
||||
}()
|
||||
run(t, "zip", "-9r", output, ".")
|
||||
})
|
||||
}
|
||||
|
||||
// Test creating and reading back some archives
|
||||
//
|
||||
// Note that this uses rclone and squashfs as external binaries.
|
||||
func TestArchiveSquashfs(t *testing.T) {
|
||||
fstest.Initialise()
|
||||
skipIfNoExe(t, "mksquashfs")
|
||||
skipIfNoExe(t, "rclone")
|
||||
testArchive(t, "test.sqfs", func(t *testing.T, output, input string) {
|
||||
run(t, "mksquashfs", input, output)
|
||||
})
|
||||
}
|
||||
67
backend/archive/archive_test.go
Normal file
67
backend/archive/archive_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Test Archive filesystem interface
|
||||
package archive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"ListR", "ListP", "MkdirMetadata", "DirSetModTime"}
|
||||
// In these tests we receive objects from the underlying remote which don't implement these methods
|
||||
unimplementableObjectMethods = []string{"GetTier", "ID", "Metadata", "MimeType", "SetTier", "UnWrap", "SetMetadata"}
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
if *fstest.RemoteName == "" {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
remote := t.TempDir()
|
||||
name := "TestArchiveLocal"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "archive"},
|
||||
{Name: name, Key: "remote", Value: remote},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
remote := ":memory:"
|
||||
name := "TestArchiveMemory"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "archive"},
|
||||
{Name: name, Key: "remote", Value: remote},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
UnimplementableFsMethods: unimplementableFsMethods,
|
||||
UnimplementableObjectMethods: unimplementableObjectMethods,
|
||||
})
|
||||
}
|
||||
7
backend/archive/archive_unsupported.go
Normal file
7
backend/archive/archive_unsupported.go
Normal file
@@ -0,0 +1,7 @@
|
||||
// Build for archive for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9
|
||||
|
||||
// Package archive implements a backend to access archive files in a remote
|
||||
package archive
|
||||
24
backend/archive/archiver/archiver.go
Normal file
24
backend/archive/archiver/archiver.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Package archiver registers all the archivers
|
||||
package archiver
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Archiver describes an archive package
|
||||
type Archiver struct {
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
New func(ctx context.Context, f fs.Fs, remote, prefix, root string) (fs.Fs, error)
|
||||
Extension string
|
||||
}
|
||||
|
||||
// Archivers is a slice of all registered archivers
|
||||
var Archivers []Archiver
|
||||
|
||||
// Register adds the archivers provided to the list of known archivers
|
||||
func Register(as ...Archiver) {
|
||||
Archivers = append(Archivers, as...)
|
||||
}
|
||||
233
backend/archive/base/base.go
Normal file
233
backend/archive/base/base.go
Normal file
@@ -0,0 +1,233 @@
|
||||
// Package base is a base archive Fs
|
||||
package base
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
node vfs.Node // archive object
|
||||
remote string // remote of the archive object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
}
|
||||
|
||||
var errNotImplemented = errors.New("internal error: method not implemented in archiver")
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (*Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
VFS := vfs.New(wrappedFs, nil)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
remote: remote,
|
||||
root: root,
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw zip file
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
return nil, errNotImplemented
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
165
backend/archive/squashfs/cache.go
Normal file
165
backend/archive/squashfs/cache.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package squashfs
|
||||
|
||||
// Could just be using bare object Open with RangeRequest which
|
||||
// would transfer the minimum amount of data but may be slower.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/diskfs/go-diskfs/backend"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
|
||||
// Cache file handles for accessing the file
|
||||
type cache struct {
|
||||
node vfs.Node
|
||||
fhsMu sync.Mutex
|
||||
fhs []cacheHandle
|
||||
}
|
||||
|
||||
// A cached file handle
|
||||
type cacheHandle struct {
|
||||
offset int64
|
||||
fh vfs.Handle
|
||||
}
|
||||
|
||||
// Make a new cache
|
||||
func newCache(node vfs.Node) *cache {
|
||||
return &cache{
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
// Get a vfs.Handle from the pool or open one
|
||||
//
|
||||
// This tries to find an open file handle which doesn't require seeking.
|
||||
func (c *cache) open(off int64) (fh vfs.Handle, err error) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
if len(c.fhs) > 0 {
|
||||
// Look for exact match first
|
||||
for i, cfh := range c.fhs {
|
||||
if cfh.offset == off {
|
||||
// fs.Debugf(nil, "CACHE MATCH")
|
||||
c.fhs = append(c.fhs[:i], c.fhs[i+1:]...)
|
||||
return cfh.fh, nil
|
||||
|
||||
}
|
||||
}
|
||||
// fs.Debugf(nil, "CACHE MISS")
|
||||
// Just take the first one if not found
|
||||
cfh := c.fhs[0]
|
||||
c.fhs = c.fhs[1:]
|
||||
return cfh.fh, nil
|
||||
}
|
||||
|
||||
fh, err = c.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open squashfs archive: %w", err)
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Close a vfs.Handle or return it to the pool
|
||||
//
|
||||
// off should be the offset the file handle would read from without seeking
|
||||
func (c *cache) close(fh vfs.Handle, off int64) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
c.fhs = append(c.fhs, cacheHandle{
|
||||
offset: off,
|
||||
fh: fh,
|
||||
})
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes into p starting at offset off in the underlying
|
||||
// input source. It returns the number of bytes read (0 <= n <= len(p)) and any
|
||||
// error encountered.
|
||||
//
|
||||
// When ReadAt returns n < len(p), it returns a non-nil error explaining why
|
||||
// more bytes were not returned. In this respect, ReadAt is stricter than Read.
|
||||
//
|
||||
// Even if ReadAt returns n < len(p), it may use all of p as scratch
|
||||
// space during the call. If some data is available but not len(p) bytes,
|
||||
// ReadAt blocks until either all the data is available or an error occurs.
|
||||
// In this respect ReadAt is different from Read.
|
||||
//
|
||||
// If the n = len(p) bytes returned by ReadAt are at the end of the input
|
||||
// source, ReadAt may return either err == EOF or err == nil.
|
||||
//
|
||||
// If ReadAt is reading from an input source with a seek offset, ReadAt should
|
||||
// not affect nor be affected by the underlying seek offset.
|
||||
//
|
||||
// Clients of ReadAt can execute parallel ReadAt calls on the same input
|
||||
// source.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (c *cache) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
fh, err := c.open(off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
defer func() {
|
||||
c.close(fh, off+int64(len(p)))
|
||||
}()
|
||||
// fs.Debugf(nil, "ReadAt(p[%d], off=%d, fh=%p)", len(p), off, fh)
|
||||
return fh.ReadAt(p, off)
|
||||
}
|
||||
|
||||
var errCacheNotImplemented = errors.New("internal error: squashfs cache doesn't implement method")
|
||||
|
||||
// WriteAt method dummy stub to satisfy interface
|
||||
func (c *cache) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Seek method dummy stub to satisfy interface
|
||||
func (c *cache) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Read method dummy stub to satisfy interface
|
||||
func (c *cache) Read(p []byte) (n int, err error) {
|
||||
return 0, errCacheNotImplemented
|
||||
}
|
||||
|
||||
func (c *cache) Stat() (fs.FileInfo, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Close the file
|
||||
func (c *cache) Close() (err error) {
|
||||
c.fhsMu.Lock()
|
||||
defer c.fhsMu.Unlock()
|
||||
|
||||
// Close any open file handles
|
||||
for i := range c.fhs {
|
||||
fh := &c.fhs[i]
|
||||
newErr := fh.fh.Close()
|
||||
if err == nil {
|
||||
err = newErr
|
||||
}
|
||||
}
|
||||
c.fhs = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Sys returns OS-specific file for ioctl calls via fd
|
||||
func (c *cache) Sys() (*os.File, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// Writable returns file for read-write operations
|
||||
func (c *cache) Writable() (backend.WritableFile, error) {
|
||||
return nil, errCacheNotImplemented
|
||||
}
|
||||
|
||||
// check interfaces
|
||||
var _ backend.Storage = (*cache)(nil)
|
||||
446
backend/archive/squashfs/squashfs.go
Normal file
446
backend/archive/squashfs/squashfs.go
Normal file
@@ -0,0 +1,446 @@
|
||||
// Package squashfs implements a squashfs archiver for the archive backend
|
||||
package squashfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/diskfs/go-diskfs/filesystem/squashfs"
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
func init() {
|
||||
archiver.Register(archiver.Archiver{
|
||||
New: New,
|
||||
Extension: ".sqfs",
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
sqfs *squashfs.FileSystem // interface to the squashfs
|
||||
c *cache
|
||||
node vfs.Node // squashfs file object - set if reading
|
||||
remote string // remote of the squashfs file object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
}
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "Squashfs: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.ReadWait = 0
|
||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
c := newCache(node)
|
||||
|
||||
// FIXME blocksize
|
||||
sqfs, err := squashfs.Read(c, node.Size(), 0, 1024*1024)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read squashfs: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
sqfs: sqfs,
|
||||
c: c,
|
||||
remote: remote,
|
||||
root: strings.Trim(root, "/"),
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
if prefix == "" {
|
||||
f.prefixSlash = ""
|
||||
}
|
||||
|
||||
singleObject := false
|
||||
|
||||
// Find the directory the root points to
|
||||
if f.root != "" && !strings.HasSuffix(root, "/") {
|
||||
native, err := f.toNative("")
|
||||
if err == nil {
|
||||
native = strings.TrimRight(native, "/")
|
||||
_, err := f.newObjectNative(native)
|
||||
if err == nil {
|
||||
// If it pointed to a file, find the directory above
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gsquashfs
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if singleObject {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Squashfs %q", f.name)
|
||||
}
|
||||
|
||||
// This turns a remote into a native path in the squashfs starting with a /
|
||||
func (f *Fs) toNative(remote string) (string, error) {
|
||||
native := strings.Trim(remote, "/")
|
||||
if f.prefix == "" {
|
||||
native = "/" + native
|
||||
} else if native == f.prefix {
|
||||
native = "/"
|
||||
} else if !strings.HasPrefix(native, f.prefixSlash) {
|
||||
return "", fmt.Errorf("internal error: %q doesn't start with prefix %q", native, f.prefixSlash)
|
||||
} else {
|
||||
native = native[len(f.prefix):]
|
||||
}
|
||||
if f.root != "" {
|
||||
native = "/" + f.root + native
|
||||
}
|
||||
return native, nil
|
||||
}
|
||||
|
||||
// Turn a (nativeDir, leaf) into a remote
|
||||
func (f *Fs) fromNative(nativeDir string, leaf string) string {
|
||||
// fs.Debugf(nil, "nativeDir = %q, leaf = %q, root=%q", nativeDir, leaf, f.root)
|
||||
dir := nativeDir
|
||||
if f.root != "" {
|
||||
dir = strings.TrimPrefix(dir, "/"+f.root)
|
||||
}
|
||||
remote := f.prefixSlash + strings.Trim(path.Join(dir, leaf), "/")
|
||||
// fs.Debugf(nil, "dir = %q, remote=%q", dir, remote)
|
||||
return remote
|
||||
}
|
||||
|
||||
// Convert a FileInfo into an Object from native dir
|
||||
func (f *Fs) objectFromFileInfo(nativeDir string, item squashfs.FileStat) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: f.fromNative(nativeDir, item.Name()),
|
||||
size: item.Size(),
|
||||
modTime: item.ModTime(),
|
||||
item: item,
|
||||
}
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
|
||||
nativeDir, err := f.toNative(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items, err := f.sqfs.ReadDir(nativeDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read squashfs: couldn't read directory: %w", err)
|
||||
}
|
||||
|
||||
entries = make(fs.DirEntries, 0, len(items))
|
||||
for _, fi := range items {
|
||||
item, ok := fi.(squashfs.FileStat)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||
}
|
||||
// fs.Debugf(item.Name(), "entry = %#v", item)
|
||||
var entry fs.DirEntry
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading item %q: %q", item.Name(), err)
|
||||
}
|
||||
if item.IsDir() {
|
||||
var remote = f.fromNative(nativeDir, item.Name())
|
||||
entry = fs.NewDir(remote, item.ModTime())
|
||||
} else {
|
||||
if item.Mode().IsRegular() {
|
||||
entry = f.objectFromFileInfo(nativeDir, item)
|
||||
} else {
|
||||
fs.Debugf(item.Name(), "FIXME Not regular file - skipping")
|
||||
continue
|
||||
}
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// newObjectNative finds the object at the native path passed in
|
||||
func (f *Fs) newObjectNative(nativePath string) (o fs.Object, err error) {
|
||||
// get the path and filename
|
||||
dir, leaf := path.Split(nativePath)
|
||||
dir = strings.TrimRight(dir, "/")
|
||||
leaf = strings.Trim(leaf, "/")
|
||||
|
||||
// FIXME need to detect directory not found
|
||||
fis, err := f.sqfs.ReadDir(dir)
|
||||
if err != nil {
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if fi.Name() == leaf {
|
||||
if fi.IsDir() {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
item, ok := fi.(squashfs.FileStat)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: unexpected type for %q: %T", fi.Name(), fi)
|
||||
}
|
||||
o = f.objectFromFileInfo(dir, item)
|
||||
break
|
||||
}
|
||||
}
|
||||
if o == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||
|
||||
nativePath, err := f.toNative(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectNative(nativePath)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw squashfs file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
item squashfs.FileStat
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Turn a squashfs path into a full path for the parent Fs
|
||||
// func (o *Object) path(remote string) string {
|
||||
// return path.Join(o.fs.prefix, remote)
|
||||
// }
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
remote, err := o.fs.toNative(o.remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Opening %q", remote)
|
||||
//fh, err := o.fs.sqfs.OpenFile(remote, os.O_RDONLY)
|
||||
fh, err := o.item.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// discard data from start as necessary
|
||||
if offset > 0 {
|
||||
_, err = fh.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If limited then don't return everything
|
||||
if limit >= 0 {
|
||||
fs.Debugf(nil, "limit=%d, offset=%d, options=%v", limit, offset, options)
|
||||
return readers.NewLimitedReadCloser(fh, limit), nil
|
||||
}
|
||||
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
385
backend/archive/zip/zip.go
Normal file
385
backend/archive/zip/zip.go
Normal file
@@ -0,0 +1,385 @@
|
||||
// Package zip implements a zip archiver for the archive backend
|
||||
package zip
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/archive/archiver"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfscommon"
|
||||
)
|
||||
|
||||
func init() {
|
||||
archiver.Register(archiver.Archiver{
|
||||
New: New,
|
||||
Extension: ".zip",
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
f fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
features *fs.Features // optional features
|
||||
vfs *vfs.VFS
|
||||
node vfs.Node // zip file object - set if reading
|
||||
remote string // remote of the zip file object
|
||||
prefix string // position for objects
|
||||
prefixSlash string // position for objects with a slash on
|
||||
root string // position to read from within the archive
|
||||
dt dirtree.DirTree // read from zipfile
|
||||
}
|
||||
|
||||
// New constructs an Fs from the (wrappedFs, remote) with the objects
|
||||
// prefix with prefix and rooted at root
|
||||
func New(ctx context.Context, wrappedFs fs.Fs, remote, prefix, root string) (fs.Fs, error) {
|
||||
// FIXME vfs cache?
|
||||
// FIXME could factor out ReadFileHandle and just use that rather than the full VFS
|
||||
fs.Debugf(nil, "Zip: New: remote=%q, prefix=%q, root=%q", remote, prefix, root)
|
||||
vfsOpt := vfscommon.Opt
|
||||
vfsOpt.ReadWait = 0
|
||||
VFS := vfs.New(wrappedFs, &vfsOpt)
|
||||
node, err := VFS.Stat(remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find %q archive: %w", remote, err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
f: wrappedFs,
|
||||
name: path.Join(fs.ConfigString(wrappedFs), remote),
|
||||
vfs: VFS,
|
||||
node: node,
|
||||
remote: remote,
|
||||
root: root,
|
||||
prefix: prefix,
|
||||
prefixSlash: prefix + "/",
|
||||
}
|
||||
|
||||
// Read the contents of the zip file
|
||||
singleObject, err := f.readZip()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
|
||||
// FIXME
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
//
|
||||
// FIXME some of these need to be forced on - CanHaveEmptyDirectories
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: false,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false, // MimeTypes not supported with gzip
|
||||
WriteMimeType: false,
|
||||
BucketBased: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
if singleObject {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Zip %q", f.name)
|
||||
}
|
||||
|
||||
// readZip the zip file into f
|
||||
//
|
||||
// Returns singleObject=true if f.root points to a file
|
||||
func (f *Fs) readZip() (singleObject bool, err error) {
|
||||
if f.node == nil {
|
||||
return singleObject, fs.ErrorDirNotFound
|
||||
}
|
||||
size := f.node.Size()
|
||||
if size < 0 {
|
||||
return singleObject, errors.New("can't read from zip file with unknown size")
|
||||
}
|
||||
r, err := f.node.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return singleObject, fmt.Errorf("failed to open zip file: %w", err)
|
||||
}
|
||||
zr, err := zip.NewReader(r, size)
|
||||
if err != nil {
|
||||
return singleObject, fmt.Errorf("failed to read zip file: %w", err)
|
||||
}
|
||||
dt := dirtree.New()
|
||||
for _, file := range zr.File {
|
||||
remote := strings.Trim(path.Clean(file.Name), "/")
|
||||
if remote == "." {
|
||||
remote = ""
|
||||
}
|
||||
remote = path.Join(f.prefix, remote)
|
||||
if f.root != "" {
|
||||
// Ignore all files outside the root
|
||||
if !strings.HasPrefix(remote, f.root) {
|
||||
continue
|
||||
}
|
||||
if remote == f.root {
|
||||
remote = ""
|
||||
} else {
|
||||
remote = strings.TrimPrefix(remote, f.root+"/")
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(file.Name, "/") {
|
||||
dir := fs.NewDir(remote, file.Modified)
|
||||
dt.AddDir(dir)
|
||||
} else {
|
||||
if remote == "" {
|
||||
remote = path.Base(f.root)
|
||||
singleObject = true
|
||||
dt = dirtree.New()
|
||||
}
|
||||
o := &Object{
|
||||
f: f,
|
||||
remote: remote,
|
||||
fh: &file.FileHeader,
|
||||
file: file,
|
||||
}
|
||||
dt.Add(o)
|
||||
if singleObject {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
dt.CheckParents("")
|
||||
dt.Sort()
|
||||
f.dt = dt
|
||||
//fs.Debugf(nil, "dt = %v", dt)
|
||||
return singleObject, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
defer log.Trace(f, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
// _, err = f.strip(dir)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
entries, ok := f.dt[dir]
|
||||
if !ok {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
fs.Debugf(f, "dir=%q, entries=%v", dir, entries)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
defer log.Trace(f, "remote=%q", remote)("obj=%v, err=%v", &o, &err)
|
||||
if f.dt == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
_, entry := f.dt.Find(remote)
|
||||
if entry == nil {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
o, ok := entry.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
|
||||
return nil, vfs.EROFS
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.CRC32)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.f
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// Object describes an object to be read from the raw zip file
|
||||
type Object struct {
|
||||
f *Fs
|
||||
remote string
|
||||
fh *zip.FileHeader
|
||||
file *zip.File
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.fh.UncompressedSize64)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.fh.Modified
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Storable raturns a boolean indicating if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
if ht == hash.CRC32 {
|
||||
// FIXME return empty CRC if writing
|
||||
if o.f.dt == nil {
|
||||
return "", nil
|
||||
}
|
||||
return fmt.Sprintf("%08x", o.fh.CRC32), nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rc, err = o.file.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// discard data from start as necessary
|
||||
if offset > 0 {
|
||||
_, err = io.CopyN(io.Discard, rc, offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// If limited then don't return everything
|
||||
if limit >= 0 {
|
||||
return readers.NewLimitedReadCloser(rc, limit), nil
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return vfs.EROFS
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
@@ -2797,8 +2797,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
@@ -2810,7 +2808,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2852,7 +2850,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
@@ -56,6 +56,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -843,15 +844,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
var entries fs.DirEntries
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
subDirClient := f.dirClient(dir)
|
||||
|
||||
// Checking whether directory exists
|
||||
_, err := subDirClient.GetProperties(ctx, nil)
|
||||
if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
|
||||
return entries, fs.ErrorDirNotFound
|
||||
return fs.ErrorDirNotFound
|
||||
} else if err != nil {
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
|
||||
opt := &directory.ListFilesAndDirectoriesOptions{
|
||||
@@ -863,7 +881,7 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
for pager.More() {
|
||||
resp, err := pager.NextPage(ctx)
|
||||
if err != nil {
|
||||
return entries, err
|
||||
return err
|
||||
}
|
||||
for _, directory := range resp.Segment.Directories {
|
||||
// Name *string `xml:"Name"`
|
||||
@@ -889,7 +907,10 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
if directory.Properties.ContentLength != nil {
|
||||
entry.SetSize(*directory.Properties.ContentLength)
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, file := range resp.Segment.Files {
|
||||
leaf := f.opt.Enc.ToStandardPath(*file.Name)
|
||||
@@ -903,10 +924,13 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
|
||||
if file.Properties.LastWriteTime != nil {
|
||||
entry.modTime = *file.Properties.LastWriteTime
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -1313,10 +1337,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
srcURL := srcObj.fileClient().URL()
|
||||
fc := f.fileClient(remote)
|
||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||
}
|
||||
|
||||
// Poll for completion if necessary
|
||||
//
|
||||
// The for loop is never executed for same storage account copies.
|
||||
copyStatus := startCopy.CopyStatus
|
||||
var properties file.GetPropertiesResponse
|
||||
pollTime := 100 * time.Millisecond
|
||||
|
||||
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||
time.Sleep(pollTime)
|
||||
|
||||
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = properties.CopyStatus
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||
@@ -1431,6 +1474,7 @@ var (
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.ListPer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
|
||||
@@ -2224,13 +2224,17 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
ChunkSize: up.chunkSize,
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil, options...)
|
||||
return info, up, err
|
||||
return info, up, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
@@ -705,9 +706,27 @@ OUTER:
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, directoryID, false, false, true, func(info *api.Item) bool {
|
||||
@@ -717,14 +736,22 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
f.dirCache.Put(remote, info.ID)
|
||||
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
err = list.Add(d)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
} else if info.Type == api.ItemTypeFile {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Cache some metadata for this Item to help us process events later
|
||||
@@ -740,12 +767,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -1741,6 +1768,7 @@ var (
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
|
||||
1
backend/cache/utils_test.go
vendored
1
backend/cache/utils_test.go
vendored
@@ -1,5 +1,4 @@
|
||||
//go:build !plan9 && !js
|
||||
// +build !plan9,!js
|
||||
|
||||
package cache
|
||||
|
||||
|
||||
@@ -1965,9 +1965,28 @@ func (f *Fs) findImportFormat(ctx context.Context, mimeType string) string {
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
entriesAdded := 0
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
directoryID = actualID(directoryID)
|
||||
|
||||
@@ -1979,25 +1998,30 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return true
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entriesAdded++
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
if f.isTeamDrive && entriesAdded == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listREntry is a task to be executed by a litRRunner
|
||||
@@ -4617,6 +4641,7 @@ var (
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
|
||||
@@ -47,6 +47,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/batcher"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -834,7 +835,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// listSharedFolders lists all available shared folders mounted and not mounted
|
||||
// we'll need the id later so we have to return them in original format
|
||||
func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listSharedFolders(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||
started := false
|
||||
var res *sharing.ListFoldersResult
|
||||
for {
|
||||
@@ -847,7 +848,7 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -859,15 +860,15 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
leaf := f.opt.Enc.ToStandardName(entry.Name)
|
||||
d := fs.NewDir(leaf, time.Time{}).SetID(entry.SharedFolderId)
|
||||
entries = append(entries, d)
|
||||
err = callback(d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
@@ -875,21 +876,25 @@ func (f *Fs) listSharedFolders(ctx context.Context) (entries fs.DirEntries, err
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// findSharedFolder find the id for a given shared folder name
|
||||
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
|
||||
// so our only option is to iterate over all shared folders
|
||||
func (f *Fs) findSharedFolder(ctx context.Context, name string) (id string, err error) {
|
||||
entries, err := f.listSharedFolders(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
errFoundFile := errors.New("found file")
|
||||
err = f.listSharedFolders(ctx, func(entry fs.DirEntry) error {
|
||||
if entry.(*fs.Dir).Remote() == name {
|
||||
return entry.(*fs.Dir).ID(), nil
|
||||
id = entry.(*fs.Dir).ID()
|
||||
return errFoundFile
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, errFoundFile) {
|
||||
return id, nil
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "", fs.ErrorDirNotFound
|
||||
}
|
||||
@@ -908,7 +913,7 @@ func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {
|
||||
|
||||
// listReceivedFiles lists shared the user as access to (note this means individual
|
||||
// files not files contained in shared folders)
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listReceivedFiles(ctx context.Context, callback func(fs.DirEntry) error) (err error) {
|
||||
started := false
|
||||
var res *sharing.ListFilesResult
|
||||
for {
|
||||
@@ -921,7 +926,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -933,7 +938,7 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -946,26 +951,33 @@ func (f *Fs) listReceivedFiles(ctx context.Context) (entries fs.DirEntries, err
|
||||
modTime: *entry.TimeInvited,
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = callback(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
if res.Cursor == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err error) {
|
||||
files, err := f.listReceivedFiles(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range files {
|
||||
errFoundFile := errors.New("found file")
|
||||
err = f.listReceivedFiles(ctx, func(entry fs.DirEntry) error {
|
||||
if entry.(*Object).remote == name {
|
||||
return entry.(*Object), nil
|
||||
o = entry.(*Object)
|
||||
return errFoundFile
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if errors.Is(err, errFoundFile) {
|
||||
return o, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -980,11 +992,37 @@ func (f *Fs) findSharedFile(ctx context.Context, name string) (o *Object, err er
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
if f.opt.SharedFiles {
|
||||
return f.listReceivedFiles(ctx)
|
||||
err := f.listReceivedFiles(ctx, list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
if f.opt.SharedFolders {
|
||||
return f.listSharedFolders(ctx)
|
||||
err := f.listSharedFolders(ctx, list.Add)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
root := f.slashRoot
|
||||
@@ -1014,7 +1052,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
started = true
|
||||
} else {
|
||||
@@ -1026,7 +1064,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("list continue: %w", err)
|
||||
return fmt.Errorf("list continue: %w", err)
|
||||
}
|
||||
}
|
||||
for _, entry := range res.Entries {
|
||||
@@ -1051,14 +1089,20 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Time{}).SetID(folderInfo.Id)
|
||||
entries = append(entries, d)
|
||||
err = list.Add(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if fileInfo != nil {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, fileInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if o.(*Object).exportType.listable() {
|
||||
entries = append(entries, o)
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1066,7 +1110,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
break
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Put the object
|
||||
@@ -2087,6 +2131,7 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -456,9 +456,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
}
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else if f.proxyURL != nil {
|
||||
if f.opt.SocksProxy != "" || f.proxyURL != nil {
|
||||
// We need to make the onward connection to f.opt.Host. However the FTP
|
||||
// library sets the host to the proxy IP after using EPSV or PASV so we need
|
||||
// to correct that here.
|
||||
@@ -468,7 +466,11 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return nil, err
|
||||
}
|
||||
dialAddress := net.JoinHostPort(f.opt.Host, dialPort)
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, dialAddress, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = proxy.HTTPConnectDial(network, dialAddress, f.proxyURL, baseDialer)
|
||||
}
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
|
||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
}, {
|
||||
Value: "us-east5",
|
||||
Help: "Ohio",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
@@ -1131,7 +1134,15 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
// Set the storage class for the destination object if configured
|
||||
var dstObject *storage.Object
|
||||
if f.opt.StorageClass != "" {
|
||||
dstObject = &storage.Object{
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
}
|
||||
|
||||
rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, dstObject)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
@@ -1419,6 +1430,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
// Set the storage class from config if configured
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
object.StorageClass = o.fs.opt.StorageClass
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -59,31 +60,43 @@ const (
|
||||
configVersion = 1
|
||||
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli"
|
||||
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
key string
|
||||
name string
|
||||
domain string
|
||||
realm string
|
||||
clientID string
|
||||
scopes []string
|
||||
}
|
||||
|
||||
// The list of services and their settings for supporting traditional OAuth.
|
||||
// Please keep these in alphabetical order, but with jottacloud first.
|
||||
func getServices() []service {
|
||||
return []service{
|
||||
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
}
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -159,36 +172,44 @@ func init() {
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch conf.State {
|
||||
case "":
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
|
||||
return nil, errors.New("not supported by this backend")
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
Help: `Standard authentication.
|
||||
This is primarily supported by the official service, but may also be
|
||||
supported by some white-label services. It is designed for command-line
|
||||
applications, and you will be asked to enter a single-use personal login
|
||||
token which you must manually generate from the account security settings
|
||||
in the web interface of your service.`,
|
||||
}, {
|
||||
Value: "traditional",
|
||||
Help: `Traditional authentication.
|
||||
This is supported by the official service and all white-label services
|
||||
that rclone knows about. You will be asked which service to connect to.
|
||||
It has a limitation of only a single active authentication at a time. You
|
||||
need to be on, or have access to, a machine with an internet-connected
|
||||
web browser.`,
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
Help: `Legacy authentication.
|
||||
This is no longer supported by any known services and not recommended
|
||||
used. You will be asked for your account's username and password.`,
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
|
||||
Generate it from the account security settings in the web interface of your
|
||||
service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
loginToken := conf.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
@@ -203,10 +224,50 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "traditional":
|
||||
services := getServices()
|
||||
options := make([]fs.OptionExample, 0, len(services))
|
||||
for _, service := range services {
|
||||
options = append(options, fs.OptionExample{
|
||||
Value: service.key,
|
||||
Help: service.name,
|
||||
})
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
|
||||
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
|
||||
options)
|
||||
case "traditional_type":
|
||||
services := getServices()
|
||||
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
|
||||
if i == -1 {
|
||||
return nil, fmt.Errorf("unexpected service %q", conf.Result)
|
||||
}
|
||||
service := services[i]
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
|
||||
}
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, service.clientID)
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: wellKnown.AuthorizationEndpoint,
|
||||
TokenURL: wellKnown.TokenEndpoint,
|
||||
ClientID: service.clientID,
|
||||
Scopes: service.scopes,
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
@@ -214,7 +275,7 @@ machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||
@@ -223,16 +284,16 @@ machines.`)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
m.Set(configUsername, conf.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
|
||||
case "legacy_password":
|
||||
m.Set("password", config.Result)
|
||||
m.Set("password", conf.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -242,12 +303,12 @@ machines.`)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
@@ -260,7 +321,7 @@ machines.`)
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
@@ -272,58 +333,6 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -331,7 +340,7 @@ section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
if conf.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
@@ -372,7 +381,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return deviceNames[i], ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
device := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -432,7 +441,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
mountpoint := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -463,7 +472,7 @@ You may create a new by entering a unique name.`, device)
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
@@ -478,7 +487,7 @@ You may create a new by entering a unique name.`, device)
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -929,12 +938,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
@@ -1000,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
@@ -1009,13 +1025,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
|
||||
@@ -497,9 +497,6 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -115,6 +115,17 @@ points, as you explicitly acknowledge that they should be skipped.`,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "skip_specials",
|
||||
Help: `Don't warn about skipped pipes, sockets and device objects.
|
||||
|
||||
This flag disables warning messages on skipped pipes, sockets and
|
||||
device objects, as you explicitly acknowledge that they should be
|
||||
skipped.`,
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
Advanced: true,
|
||||
},
|
||||
{
|
||||
Name: "zero_size_links",
|
||||
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
||||
@@ -328,6 +339,7 @@ type Options struct {
|
||||
FollowSymlinks bool `config:"copy_links"`
|
||||
TranslateSymlinks bool `config:"links"`
|
||||
SkipSymlinks bool `config:"skip_links"`
|
||||
SkipSpecials bool `config:"skip_specials"`
|
||||
UTFNorm bool `config:"unicode_normalization"`
|
||||
NoCheckUpdated bool `config:"no_check_updated"`
|
||||
NoUNC bool `config:"nounc"`
|
||||
@@ -1246,7 +1258,9 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
return false
|
||||
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
fs.Logf(o, "Can't transfer non file/directory")
|
||||
if !o.fs.opt.SkipSpecials {
|
||||
fs.Logf(o, "Can't transfer non file/directory")
|
||||
}
|
||||
return false
|
||||
} else if mode&os.ModeDir != 0 {
|
||||
// fs.Debugf(o, "Skipping directory")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build dragonfly || plan9 || js
|
||||
//go:build dragonfly || plan9 || js || aix
|
||||
|
||||
package local
|
||||
|
||||
|
||||
@@ -1377,9 +1377,27 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
|
||||
entry, err := f.itemToDirEntry(ctx, dir, info)
|
||||
@@ -1389,13 +1407,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if entry == nil {
|
||||
return nil
|
||||
}
|
||||
entries = append(entries, entry)
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -3023,6 +3044,7 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
@@ -629,11 +629,31 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
list := list.NewHelper(callback)
|
||||
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
|
||||
entries = append(entries, o)
|
||||
return nil
|
||||
return list.Add(o)
|
||||
})
|
||||
return entries, err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -1377,6 +1397,8 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Shutdowner = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
|
||||
@@ -5,6 +5,7 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -136,8 +137,25 @@ type Link struct {
|
||||
}
|
||||
|
||||
// Valid reports whether l is non-nil, has an URL, and is not expired.
|
||||
// It primarily checks the URL's expire query parameter, falling back to the Expire field.
|
||||
func (l *Link) Valid() bool {
|
||||
return l != nil && l.URL != "" && time.Now().Add(10*time.Second).Before(time.Time(l.Expire))
|
||||
if l == nil || l.URL == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Primary validation: check URL's expire query parameter
|
||||
if u, err := url.Parse(l.URL); err == nil {
|
||||
if expireStr := u.Query().Get("expire"); expireStr != "" {
|
||||
// Try parsing as Unix timestamp (seconds)
|
||||
if expireInt, err := strconv.ParseInt(expireStr, 10, 64); err == nil {
|
||||
expireTime := time.Unix(expireInt, 0)
|
||||
return time.Now().Add(10 * time.Second).Before(expireTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback validation: use the Expire field if URL parsing didn't work
|
||||
return time.Now().Add(10 * time.Second).Before(time.Time(l.Expire))
|
||||
}
|
||||
|
||||
// URL is a basic form of URL
|
||||
|
||||
99
backend/pikpak/api/types_test.go
Normal file
99
backend/pikpak/api/types_test.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestLinkValid tests the Link.Valid method for various scenarios
|
||||
func TestLinkValid(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
link *Link
|
||||
expected bool
|
||||
desc string
|
||||
}{
|
||||
{
|
||||
name: "nil link",
|
||||
link: nil,
|
||||
expected: false,
|
||||
desc: "nil link should be invalid",
|
||||
},
|
||||
{
|
||||
name: "empty URL",
|
||||
link: &Link{URL: ""},
|
||||
expected: false,
|
||||
desc: "empty URL should be invalid",
|
||||
},
|
||||
{
|
||||
name: "valid URL with future expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL with future expire parameter should be valid",
|
||||
},
|
||||
{
|
||||
name: "expired URL with past expire parameter",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL with past expire parameter should be invalid",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter takes precedence over Expire field",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)), // Fallback is expired
|
||||
},
|
||||
expected: true,
|
||||
desc: "URL expire parameter should take precedence over Expire field",
|
||||
},
|
||||
{
|
||||
name: "URL expire parameter within 10 second buffer should be invalid",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(5*time.Second).Unix()),
|
||||
},
|
||||
expected: false,
|
||||
desc: "URL expire parameter within 10 second buffer should be invalid",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when no URL expire parameter",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL has no expire parameter",
|
||||
},
|
||||
{
|
||||
name: "fallback to Expire field when URL expire parameter is invalid",
|
||||
link: &Link{
|
||||
URL: "https://example.com/file?expire=invalid",
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||
},
|
||||
{
|
||||
name: "invalid when both URL expire and Expire field are expired",
|
||||
link: &Link{
|
||||
URL: fmt.Sprintf("https://example.com/file?expire=%d", time.Now().Add(-time.Hour).Unix()),
|
||||
Expire: Time(time.Now().Add(-time.Hour)),
|
||||
},
|
||||
expected: false,
|
||||
desc: "should be invalid when both URL expire and Expire field are expired",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.link.Valid()
|
||||
if result != tt.expected {
|
||||
t.Errorf("Link.Valid() = %v, expected %v. %s", result, tt.expected, tt.desc)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
|
||||
"github.com/pquerna/otp/totp"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -87,6 +89,17 @@ The value can also be provided with --protondrive-2fa=000000
|
||||
The 2FA code of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "otp_secret_key",
|
||||
Help: `The OTP secret key
|
||||
|
||||
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
|
||||
|
||||
The OTP secret key of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: clientUIDKey,
|
||||
Help: "Client uid key (internal use only)",
|
||||
@@ -191,6 +204,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
MailboxPassword string `config:"mailbox_password"`
|
||||
TwoFA string `config:"2fa"`
|
||||
OtpSecretKey string `config:"otp_secret_key"`
|
||||
|
||||
// advanced
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -356,7 +370,15 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
|
||||
config.FirstLoginCredential.Username = opt.Username
|
||||
config.FirstLoginCredential.Password = opt.Password
|
||||
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
|
||||
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
|
||||
config.FirstLoginCredential.TwoFA = opt.TwoFA
|
||||
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
|
||||
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
|
||||
}
|
||||
config.FirstLoginCredential.TwoFA = code
|
||||
}
|
||||
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
|
||||
@@ -395,6 +417,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
if opt.OtpSecretKey != "" {
|
||||
var err error
|
||||
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
194
backend/s3/README.md
Normal file
194
backend/s3/README.md
Normal file
@@ -0,0 +1,194 @@
|
||||
## Adding a new s3 provider
|
||||
|
||||
It is quite easy to add a new S3 provider to rclone.
|
||||
|
||||
You'll then need to do add the following (optional tags are in [] and
|
||||
do not get displayed in rclone config if empty):
|
||||
|
||||
The process is as follows: Create yaml -> add docs -> run tests ->
|
||||
adjust yaml until tests pass.
|
||||
|
||||
All tags can be found in `backend/s3/providers.go` Provider Struct.
|
||||
Looking through a few of the yaml files as examples should make things
|
||||
clear. `AWS.yaml` as the most config. pasting.
|
||||
|
||||
### YAML
|
||||
|
||||
In `backend/s3/provider/YourProvider.yaml`
|
||||
|
||||
- name
|
||||
- description
|
||||
- More like the full name often "YourProvider + Object Storage"
|
||||
- [Region]
|
||||
- Any regions your provider supports or the defaults (use `region: {}` for this)
|
||||
- Example from AWS.yaml:
|
||||
```yaml
|
||||
region:
|
||||
us-east-1: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
US Region, Northern Virginia, or Pacific Northwest.
|
||||
Leave location constraint empty.
|
||||
```
|
||||
- The defaults (as seen in Rclone.yaml):
|
||||
```yaml
|
||||
region:
|
||||
"": |-
|
||||
Use this if unsure.
|
||||
Will use v4 signatures and an empty region.
|
||||
other-v2-signature: |-
|
||||
Use this only if v4 signatures don't work.
|
||||
E.g. pre Jewel/v10 CEPH.
|
||||
```
|
||||
- [Endpoint]
|
||||
- Any endpoints your provider supports
|
||||
- Example from Mega.yaml
|
||||
```yaml
|
||||
endpoint:
|
||||
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
|
||||
```
|
||||
- [Location Constraint]
|
||||
- The Location Constraint of your remote, often same as region.
|
||||
- Example from AWS.yaml
|
||||
```yaml
|
||||
location_constraint:
|
||||
"": Empty for US Region, Northern Virginia, or Pacific Northwest
|
||||
us-east-2: US East (Ohio) Region
|
||||
```
|
||||
- [ACL]
|
||||
- Identical across *most* providers. Select the default with `acl: {}`
|
||||
- Example from AWS.yaml
|
||||
```yaml
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
bucket-owner-read: |-
|
||||
Object owner gets FULL_CONTROL.
|
||||
Bucket owner gets READ access.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket-owner-full-control: |-
|
||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
```
|
||||
- [Storage Class]
|
||||
- Identical across *most* providers.
|
||||
- Defaults from AWS.yaml
|
||||
```yaml
|
||||
storage_class:
|
||||
"": Default
|
||||
STANDARD: Standard storage class
|
||||
REDUCED_REDUNDANCY: Reduced redundancy storage class
|
||||
STANDARD_IA: Standard Infrequent Access storage class
|
||||
ONEZONE_IA: One Zone Infrequent Access storage class
|
||||
GLACIER: Glacier Flexible Retrieval storage class
|
||||
DEEP_ARCHIVE: Glacier Deep Archive storage class
|
||||
INTELLIGENT_TIERING: Intelligent-Tiering storage class
|
||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||
```
|
||||
- [Server Side Encryption]
|
||||
- Not common, identical across *most* providers.
|
||||
- Defaults from AWS.yaml
|
||||
```yaml
|
||||
server_side_encryption:
|
||||
"": None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
```
|
||||
- [Advanced Options]
|
||||
- All advanced options are Boolean - if true the configurator asks about that value, if not it doesn't:
|
||||
```go
|
||||
BucketACL bool `yaml:"bucket_acl,omitempty"`
|
||||
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
|
||||
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
|
||||
RequesterPays bool `yaml:"requester_pays,omitempty"`
|
||||
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
|
||||
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
|
||||
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
|
||||
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
|
||||
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
|
||||
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
|
||||
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
|
||||
```
|
||||
- Example from AWS.yaml:
|
||||
```yaml
|
||||
bucket_acl: true
|
||||
directory_bucket: true
|
||||
leave_parts_on_error: true
|
||||
requester_pays: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
sts_endpoint: true
|
||||
use_accelerate_endpoint: true
|
||||
```
|
||||
- Quirks
|
||||
- Quirks are discovered through documentation and running the tests as seen below.
|
||||
- Most quirks are *bool as to have 3 values, `true`, `false` and `dont care`.
|
||||
```go
|
||||
type Quirks struct {
|
||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||
}
|
||||
```
|
||||
- Example from AWS.yaml
|
||||
```yaml
|
||||
quirks:
|
||||
might_gzip: false # Never auto gzips objects
|
||||
use_unsigned_payload: false # AWS has trailer support
|
||||
```
|
||||
|
||||
Note that if you omit a section, eg `region` then the user won't be
|
||||
asked that question, and if you add an empty section e.g. `region: {}`
|
||||
then the defaults from the `Other.yaml` will be used.
|
||||
|
||||
### DOCS
|
||||
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
- Add a section about the provider linked from there.
|
||||
- Make sure this is in alphabetical order in the `Providers` section.
|
||||
- Add a transcript of a trial `rclone config` session
|
||||
- Edit the transcript to remove things which might change in subsequent versions
|
||||
- **Do not** alter or add to the autogenerated parts of `s3.md`
|
||||
- Rule of thumb: don't edit anything not mentioned above.
|
||||
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
|
||||
- This will make autogenerated changes!
|
||||
- `README.md` - this is the home page in github
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- `docs/content/_index.md` - this is the home page of rclone.org
|
||||
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
|
||||
- Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
### TESTS
|
||||
|
||||
Once you've written the code, test `rclone config` works to your
|
||||
satisfaction and looks correct, and check the integration tests work
|
||||
`go test -v -remote NewS3Provider:`. You may need to adjust the quirks
|
||||
to get them to pass. Some providers just can't pass the tests with
|
||||
control characters in the names so if these fail and the provider
|
||||
doesn't support `urlEncodeListings` in the quirks then ignore them.
|
||||
139
backend/s3/provider/AWS.yaml
Normal file
139
backend/s3/provider/AWS.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
name: AWS
|
||||
description: Amazon Web Services (AWS) S3
|
||||
region:
|
||||
us-east-1: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
US Region, Northern Virginia, or Pacific Northwest.
|
||||
Leave location constraint empty.
|
||||
us-east-2: |-
|
||||
US East (Ohio) Region.
|
||||
Needs location constraint us-east-2.
|
||||
us-west-1: |-
|
||||
US West (Northern California) Region.
|
||||
Needs location constraint us-west-1.
|
||||
us-west-2: |-
|
||||
US West (Oregon) Region.
|
||||
Needs location constraint us-west-2.
|
||||
ca-central-1: |-
|
||||
Canada (Central) Region.
|
||||
Needs location constraint ca-central-1.
|
||||
eu-west-1: |-
|
||||
EU (Ireland) Region.
|
||||
Needs location constraint EU or eu-west-1.
|
||||
eu-west-2: |-
|
||||
EU (London) Region.
|
||||
Needs location constraint eu-west-2.
|
||||
eu-west-3: |-
|
||||
EU (Paris) Region.
|
||||
Needs location constraint eu-west-3.
|
||||
eu-north-1: |-
|
||||
EU (Stockholm) Region.
|
||||
Needs location constraint eu-north-1.
|
||||
eu-south-1: |-
|
||||
EU (Milan) Region.
|
||||
Needs location constraint eu-south-1.
|
||||
eu-central-1: |-
|
||||
EU (Frankfurt) Region.
|
||||
Needs location constraint eu-central-1.
|
||||
ap-southeast-1: |-
|
||||
Asia Pacific (Singapore) Region.
|
||||
Needs location constraint ap-southeast-1.
|
||||
ap-southeast-2: |-
|
||||
Asia Pacific (Sydney) Region.
|
||||
Needs location constraint ap-southeast-2.
|
||||
ap-northeast-1: |-
|
||||
Asia Pacific (Tokyo) Region.
|
||||
Needs location constraint ap-northeast-1.
|
||||
ap-northeast-2: |-
|
||||
Asia Pacific (Seoul).
|
||||
Needs location constraint ap-northeast-2.
|
||||
ap-northeast-3: |-
|
||||
Asia Pacific (Osaka-Local).
|
||||
Needs location constraint ap-northeast-3.
|
||||
ap-south-1: |-
|
||||
Asia Pacific (Mumbai).
|
||||
Needs location constraint ap-south-1.
|
||||
ap-east-1: |-
|
||||
Asia Pacific (Hong Kong) Region.
|
||||
Needs location constraint ap-east-1.
|
||||
sa-east-1: |-
|
||||
South America (Sao Paulo) Region.
|
||||
Needs location constraint sa-east-1.
|
||||
il-central-1: |-
|
||||
Israel (Tel Aviv) Region.
|
||||
Needs location constraint il-central-1.
|
||||
me-south-1: |-
|
||||
Middle East (Bahrain) Region.
|
||||
Needs location constraint me-south-1.
|
||||
af-south-1: |-
|
||||
Africa (Cape Town) Region.
|
||||
Needs location constraint af-south-1.
|
||||
cn-north-1: |-
|
||||
China (Beijing) Region.
|
||||
Needs location constraint cn-north-1.
|
||||
cn-northwest-1: |-
|
||||
China (Ningxia) Region.
|
||||
Needs location constraint cn-northwest-1.
|
||||
us-gov-east-1: |-
|
||||
AWS GovCloud (US-East) Region.
|
||||
Needs location constraint us-gov-east-1.
|
||||
us-gov-west-1: |-
|
||||
AWS GovCloud (US) Region.
|
||||
Needs location constraint us-gov-west-1.
|
||||
endpoint: {}
|
||||
location_constraint:
|
||||
'': Empty for US Region, Northern Virginia, or Pacific Northwest
|
||||
us-east-2: US East (Ohio) Region
|
||||
us-west-1: US West (Northern California) Region
|
||||
us-west-2: US West (Oregon) Region
|
||||
ca-central-1: Canada (Central) Region
|
||||
eu-west-1: EU (Ireland) Region
|
||||
eu-west-2: EU (London) Region
|
||||
eu-west-3: EU (Paris) Region
|
||||
eu-north-1: EU (Stockholm) Region
|
||||
eu-south-1: EU (Milan) Region
|
||||
EU: EU Region
|
||||
ap-southeast-1: Asia Pacific (Singapore) Region
|
||||
ap-southeast-2: Asia Pacific (Sydney) Region
|
||||
ap-northeast-1: Asia Pacific (Tokyo) Region
|
||||
ap-northeast-2: Asia Pacific (Seoul) Region
|
||||
ap-northeast-3: Asia Pacific (Osaka-Local) Region
|
||||
ap-south-1: Asia Pacific (Mumbai) Region
|
||||
ap-east-1: Asia Pacific (Hong Kong) Region
|
||||
sa-east-1: South America (Sao Paulo) Region
|
||||
il-central-1: Israel (Tel Aviv) Region
|
||||
me-south-1: Middle East (Bahrain) Region
|
||||
af-south-1: Africa (Cape Town) Region
|
||||
cn-north-1: China (Beijing) Region
|
||||
cn-northwest-1: China (Ningxia) Region
|
||||
us-gov-east-1: AWS GovCloud (US-East) Region
|
||||
us-gov-west-1: AWS GovCloud (US) Region
|
||||
acl: {}
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
REDUCED_REDUNDANCY: Reduced redundancy storage class
|
||||
STANDARD_IA: Standard Infrequent Access storage class
|
||||
ONEZONE_IA: One Zone Infrequent Access storage class
|
||||
GLACIER: Glacier Flexible Retrieval storage class
|
||||
DEEP_ARCHIVE: Glacier Deep Archive storage class
|
||||
INTELLIGENT_TIERING: Intelligent-Tiering storage class
|
||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
bucket_acl: true
|
||||
directory_bucket: true
|
||||
leave_parts_on_error: true
|
||||
requester_pays: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
sts_endpoint: true
|
||||
use_accelerate_endpoint: true
|
||||
quirks:
|
||||
might_gzip: false # Never auto gzips objects
|
||||
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking
|
||||
37
backend/s3/provider/Alibaba.yaml
Normal file
37
backend/s3/provider/Alibaba.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Alibaba
|
||||
description: Alibaba Cloud Object Storage System (OSS) formerly Aliyun
|
||||
endpoint:
|
||||
oss-accelerate.aliyuncs.com: Global Accelerate
|
||||
oss-accelerate-overseas.aliyuncs.com: Global Accelerate (outside mainland China)
|
||||
oss-cn-hangzhou.aliyuncs.com: East China 1 (Hangzhou)
|
||||
oss-cn-shanghai.aliyuncs.com: East China 2 (Shanghai)
|
||||
oss-cn-qingdao.aliyuncs.com: North China 1 (Qingdao)
|
||||
oss-cn-beijing.aliyuncs.com: North China 2 (Beijing)
|
||||
oss-cn-zhangjiakou.aliyuncs.com: North China 3 (Zhangjiakou)
|
||||
oss-cn-huhehaote.aliyuncs.com: North China 5 (Hohhot)
|
||||
oss-cn-wulanchabu.aliyuncs.com: North China 6 (Ulanqab)
|
||||
oss-cn-shenzhen.aliyuncs.com: South China 1 (Shenzhen)
|
||||
oss-cn-heyuan.aliyuncs.com: South China 2 (Heyuan)
|
||||
oss-cn-guangzhou.aliyuncs.com: South China 3 (Guangzhou)
|
||||
oss-cn-chengdu.aliyuncs.com: West China 1 (Chengdu)
|
||||
oss-cn-hongkong.aliyuncs.com: Hong Kong (Hong Kong)
|
||||
oss-us-west-1.aliyuncs.com: US West 1 (Silicon Valley)
|
||||
oss-us-east-1.aliyuncs.com: US East 1 (Virginia)
|
||||
oss-ap-southeast-1.aliyuncs.com: Southeast Asia Southeast 1 (Singapore)
|
||||
oss-ap-southeast-2.aliyuncs.com: Asia Pacific Southeast 2 (Sydney)
|
||||
oss-ap-southeast-3.aliyuncs.com: Southeast Asia Southeast 3 (Kuala Lumpur)
|
||||
oss-ap-southeast-5.aliyuncs.com: Asia Pacific Southeast 5 (Jakarta)
|
||||
oss-ap-northeast-1.aliyuncs.com: Asia Pacific Northeast 1 (Japan)
|
||||
oss-ap-south-1.aliyuncs.com: Asia Pacific South 1 (Mumbai)
|
||||
oss-eu-central-1.aliyuncs.com: Central Europe 1 (Frankfurt)
|
||||
oss-eu-west-1.aliyuncs.com: West Europe (London)
|
||||
oss-me-east-1.aliyuncs.com: Middle East 1 (Dubai)
|
||||
acl: {}
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
GLACIER: Archive storage mode
|
||||
STANDARD_IA: Infrequent access storage mode
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false # multipar ETags differ from AWS
|
||||
19
backend/s3/provider/ArvanCloud.yaml
Normal file
19
backend/s3/provider/ArvanCloud.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: ArvanCloud
|
||||
description: Arvan Cloud Object Storage (AOS)
|
||||
endpoint:
|
||||
s3.ir-thr-at1.arvanstorage.ir: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
Tehran Iran (Simin)
|
||||
s3.ir-tbz-sh1.arvanstorage.ir: Tabriz Iran (Shahriar)
|
||||
location_constraint:
|
||||
ir-thr-at1: Tehran Iran (Simin)
|
||||
ir-tbz-sh1: Tabriz Iran (Shahriar)
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
20
backend/s3/provider/Ceph.yaml
Normal file
20
backend/s3/provider/Ceph.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
name: Ceph
|
||||
description: Ceph Object Storage
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
bucket_acl: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
98
backend/s3/provider/ChinaMobile.yaml
Normal file
98
backend/s3/provider/ChinaMobile.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
name: ChinaMobile
|
||||
description: China Mobile Ecloud Elastic Object Storage (EOS)
|
||||
endpoint:
|
||||
eos-wuxi-1.cmecloud.cn: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
East China (Suzhou)
|
||||
eos-jinan-1.cmecloud.cn: East China (Jinan)
|
||||
eos-ningbo-1.cmecloud.cn: East China (Hangzhou)
|
||||
eos-shanghai-1.cmecloud.cn: East China (Shanghai-1)
|
||||
eos-zhengzhou-1.cmecloud.cn: Central China (Zhengzhou)
|
||||
eos-hunan-1.cmecloud.cn: Central China (Changsha-1)
|
||||
eos-zhuzhou-1.cmecloud.cn: Central China (Changsha-2)
|
||||
eos-guangzhou-1.cmecloud.cn: South China (Guangzhou-2)
|
||||
eos-dongguan-1.cmecloud.cn: South China (Guangzhou-3)
|
||||
eos-beijing-1.cmecloud.cn: North China (Beijing-1)
|
||||
eos-beijing-2.cmecloud.cn: North China (Beijing-2)
|
||||
eos-beijing-4.cmecloud.cn: North China (Beijing-3)
|
||||
eos-huhehaote-1.cmecloud.cn: North China (Huhehaote)
|
||||
eos-chengdu-1.cmecloud.cn: Southwest China (Chengdu)
|
||||
eos-chongqing-1.cmecloud.cn: Southwest China (Chongqing)
|
||||
eos-guiyang-1.cmecloud.cn: Southwest China (Guiyang)
|
||||
eos-xian-1.cmecloud.cn: Nouthwest China (Xian)
|
||||
eos-yunnan.cmecloud.cn: Yunnan China (Kunming)
|
||||
eos-yunnan-2.cmecloud.cn: Yunnan China (Kunming-2)
|
||||
eos-tianjin-1.cmecloud.cn: Tianjin China (Tianjin)
|
||||
eos-jilin-1.cmecloud.cn: Jilin China (Changchun)
|
||||
eos-hubei-1.cmecloud.cn: Hubei China (Xiangyan)
|
||||
eos-jiangxi-1.cmecloud.cn: Jiangxi China (Nanchang)
|
||||
eos-gansu-1.cmecloud.cn: Gansu China (Lanzhou)
|
||||
eos-shanxi-1.cmecloud.cn: Shanxi China (Taiyuan)
|
||||
eos-liaoning-1.cmecloud.cn: Liaoning China (Shenyang)
|
||||
eos-hebei-1.cmecloud.cn: Hebei China (Shijiazhuang)
|
||||
eos-fujian-1.cmecloud.cn: Fujian China (Xiamen)
|
||||
eos-guangxi-1.cmecloud.cn: Guangxi China (Nanning)
|
||||
eos-anhui-1.cmecloud.cn: Anhui China (Huainan)
|
||||
location_constraint:
|
||||
wuxi1: East China (Suzhou)
|
||||
jinan1: East China (Jinan)
|
||||
ningbo1: East China (Hangzhou)
|
||||
shanghai1: East China (Shanghai-1)
|
||||
zhengzhou1: Central China (Zhengzhou)
|
||||
hunan1: Central China (Changsha-1)
|
||||
zhuzhou1: Central China (Changsha-2)
|
||||
guangzhou1: South China (Guangzhou-2)
|
||||
dongguan1: South China (Guangzhou-3)
|
||||
beijing1: North China (Beijing-1)
|
||||
beijing2: North China (Beijing-2)
|
||||
beijing4: North China (Beijing-3)
|
||||
huhehaote1: North China (Huhehaote)
|
||||
chengdu1: Southwest China (Chengdu)
|
||||
chongqing1: Southwest China (Chongqing)
|
||||
guiyang1: Southwest China (Guiyang)
|
||||
xian1: Northwest China (Xian)
|
||||
yunnan: Yunnan China (Kunming)
|
||||
yunnan2: Yunnan China (Kunming-2)
|
||||
tianjin1: Tianjin China (Tianjin)
|
||||
jilin1: Jilin China (Changchun)
|
||||
hubei1: Hubei China (Xiangyan)
|
||||
jiangxi1: Jiangxi China (Nanchang)
|
||||
gansu1: Gansu China (Lanzhou)
|
||||
shanxi1: Shanxi China (Taiyuan)
|
||||
liaoning1: Liaoning China (Shenyang)
|
||||
hebei1: Hebei China (Shijiazhuang)
|
||||
fujian1: Fujian China (Xiamen)
|
||||
guangxi1: Guangxi China (Nanning)
|
||||
anhui1: Anhui China (Huainan)
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
GLACIER: Archive storage mode
|
||||
STANDARD_IA: Infrequent access storage mode
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
bucket_acl: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
8
backend/s3/provider/Cloudflare.yaml
Normal file
8
backend/s3/provider/Cloudflare.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: Cloudflare
|
||||
description: Cloudflare R2 Storage
|
||||
region:
|
||||
auto: R2 buckets are automatically distributed across Cloudflare's data centers for low latency.
|
||||
endpoint: {}
|
||||
quirks:
|
||||
force_path_style: true
|
||||
use_multipart_etag: false # multipart ETags are random
|
||||
10
backend/s3/provider/Cubbit.yaml
Normal file
10
backend/s3/provider/Cubbit.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Cubbit
|
||||
description: Cubbit DS3 Object Storage
|
||||
region:
|
||||
eu-west-1: Europe West
|
||||
endpoint:
|
||||
s3.cubbit.eu: Cubbit DS3 Object Storage endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
20
backend/s3/provider/DigitalOcean.yaml
Normal file
20
backend/s3/provider/DigitalOcean.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
name: DigitalOcean
|
||||
description: DigitalOcean Spaces
|
||||
region: {}
|
||||
endpoint:
|
||||
syd1.digitaloceanspaces.com: DigitalOcean Spaces Sydney 1
|
||||
sfo3.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 3
|
||||
sfo2.digitaloceanspaces.com: DigitalOcean Spaces San Francisco 2
|
||||
fra1.digitaloceanspaces.com: DigitalOcean Spaces Frankfurt 1
|
||||
nyc3.digitaloceanspaces.com: DigitalOcean Spaces New York 3
|
||||
ams3.digitaloceanspaces.com: DigitalOcean Spaces Amsterdam 3
|
||||
sgp1.digitaloceanspaces.com: DigitalOcean Spaces Singapore 1
|
||||
lon1.digitaloceanspaces.com: DigitalOcean Spaces London 1
|
||||
tor1.digitaloceanspaces.com: DigitalOcean Spaces Toronto 1
|
||||
blr1.digitaloceanspaces.com: DigitalOcean Spaces Bangalore 1
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
11
backend/s3/provider/Dreamhost.yaml
Normal file
11
backend/s3/provider/Dreamhost.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Dreamhost
|
||||
description: Dreamhost DreamObjects
|
||||
region: {}
|
||||
endpoint:
|
||||
objects-us-east-1.dream.io: Dream Objects endpoint
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
9
backend/s3/provider/Exaba.yaml
Normal file
9
backend/s3/provider/Exaba.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
name: Exaba
|
||||
description: Exaba Object Storage
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
21
backend/s3/provider/FileLu.yaml
Normal file
21
backend/s3/provider/FileLu.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
name: FileLu
|
||||
description: FileLu S5 (S3-Compatible Object Storage)
|
||||
region:
|
||||
global: Global
|
||||
us-east: North America (US-East)
|
||||
eu-central: Europe (EU-Central)
|
||||
ap-southeast: Asia Pacific (AP-Southeast)
|
||||
me-central: Middle East (ME-Central)
|
||||
endpoint:
|
||||
s5lu.com: Global FileLu S5 endpoint
|
||||
us.s5lu.com: North America (US-East) region endpoint
|
||||
eu.s5lu.com: Europe (EU-Central) region endpoint
|
||||
ap.s5lu.com: Asia Pacific (AP-Southeast) region endpoint
|
||||
me.s5lu.com: Middle East (ME-Central) region endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
6
backend/s3/provider/FlashBlade.yaml
Normal file
6
backend/s3/provider/FlashBlade.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: FlashBlade
|
||||
description: Pure Storage FlashBlade Object Storage
|
||||
endpoint: {}
|
||||
quirks:
|
||||
might_gzip: false # never auto-gzip
|
||||
force_path_style: true # supports vhost but defaults to path-style
|
||||
20
backend/s3/provider/GCS.yaml
Normal file
20
backend/s3/provider/GCS.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
name: GCS
|
||||
description: Google Cloud Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
https://storage.googleapis.com: Google Cloud Storage endpoint
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
# Google break request Signature by mutating accept-encoding HTTP header
|
||||
# https://github.com/rclone/rclone/issues/6670
|
||||
use_accept_encoding_gzip: false
|
||||
sign_accept_encoding: false
|
||||
use_already_exists: true # returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
# GCS doesn't like the x-id URL parameter the SDKv2 inserts
|
||||
use_x_id: false
|
||||
# GCS S3 doesn't support multi-part server side copy:
|
||||
# See: https://issuetracker.google.com/issues/323465186
|
||||
# So make cutoff very large which it does seem to support
|
||||
copy_cutoff: 9223372036854775807
|
||||
15
backend/s3/provider/Hetzner.yaml
Normal file
15
backend/s3/provider/Hetzner.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Hetzner
|
||||
description: Hetzner Object Storage
|
||||
region:
|
||||
hel1: Helsinki
|
||||
fsn1: Falkenstein
|
||||
nbg1: Nuremberg
|
||||
endpoint:
|
||||
hel1.your-objectstorage.com: Helsinki
|
||||
fsn1.your-objectstorage.com: Falkenstein
|
||||
nbg1.your-objectstorage.com: Nuremberg
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_already_exists: false
|
||||
41
backend/s3/provider/HuaweiOBS.yaml
Normal file
41
backend/s3/provider/HuaweiOBS.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
name: HuaweiOBS
|
||||
description: Huawei Object Storage Service
|
||||
region:
|
||||
af-south-1: AF-Johannesburg
|
||||
ap-southeast-2: AP-Bangkok
|
||||
ap-southeast-3: AP-Singapore
|
||||
cn-east-3: CN East-Shanghai1
|
||||
cn-east-2: CN East-Shanghai2
|
||||
cn-north-1: CN North-Beijing1
|
||||
cn-north-4: CN North-Beijing4
|
||||
cn-south-1: CN South-Guangzhou
|
||||
ap-southeast-1: CN-Hong Kong
|
||||
sa-argentina-1: LA-Buenos Aires1
|
||||
sa-peru-1: LA-Lima1
|
||||
na-mexico-1: LA-Mexico City1
|
||||
sa-chile-1: LA-Santiago2
|
||||
sa-brazil-1: LA-Sao Paulo1
|
||||
ru-northwest-2: RU-Moscow2
|
||||
endpoint:
|
||||
obs.af-south-1.myhuaweicloud.com: AF-Johannesburg
|
||||
obs.ap-southeast-2.myhuaweicloud.com: AP-Bangkok
|
||||
obs.ap-southeast-3.myhuaweicloud.com: AP-Singapore
|
||||
obs.cn-east-3.myhuaweicloud.com: CN East-Shanghai1
|
||||
obs.cn-east-2.myhuaweicloud.com: CN East-Shanghai2
|
||||
obs.cn-north-1.myhuaweicloud.com: CN North-Beijing1
|
||||
obs.cn-north-4.myhuaweicloud.com: CN North-Beijing4
|
||||
obs.cn-south-1.myhuaweicloud.com: CN South-Guangzhou
|
||||
obs.ap-southeast-1.myhuaweicloud.com: CN-Hong Kong
|
||||
obs.sa-argentina-1.myhuaweicloud.com: LA-Buenos Aires1
|
||||
obs.sa-peru-1.myhuaweicloud.com: LA-Lima1
|
||||
obs.na-mexico-1.myhuaweicloud.com: LA-Mexico City1
|
||||
obs.sa-chile-1.myhuaweicloud.com: LA-Santiago2
|
||||
obs.sa-brazil-1.myhuaweicloud.com: LA-Sao Paulo1
|
||||
obs.ru-northwest-2.myhuaweicloud.com: RU-Moscow2
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
# Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
||||
list_url_encode: false
|
||||
list_version: 1
|
||||
use_already_exists: false
|
||||
126
backend/s3/provider/IBMCOS.yaml
Normal file
126
backend/s3/provider/IBMCOS.yaml
Normal file
@@ -0,0 +1,126 @@
|
||||
name: IBMCOS
|
||||
description: IBM COS S3
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.us.cloud-object-storage.appdomain.cloud: US Cross Region Endpoint
|
||||
s3.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Endpoint
|
||||
s3.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Endpoint
|
||||
s3.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Endpoint
|
||||
s3.private.us.cloud-object-storage.appdomain.cloud: US Cross Region Private Endpoint
|
||||
s3.private.dal.us.cloud-object-storage.appdomain.cloud: US Cross Region Dallas Private Endpoint
|
||||
s3.private.wdc.us.cloud-object-storage.appdomain.cloud: US Cross Region Washington DC Private Endpoint
|
||||
s3.private.sjc.us.cloud-object-storage.appdomain.cloud: US Cross Region San Jose Private Endpoint
|
||||
s3.us-east.cloud-object-storage.appdomain.cloud: US Region East Endpoint
|
||||
s3.private.us-east.cloud-object-storage.appdomain.cloud: US Region East Private Endpoint
|
||||
s3.us-south.cloud-object-storage.appdomain.cloud: US Region South Endpoint
|
||||
s3.private.us-south.cloud-object-storage.appdomain.cloud: US Region South Private Endpoint
|
||||
s3.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Endpoint
|
||||
s3.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Endpoint
|
||||
s3.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Endpoint
|
||||
s3.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Endpoint
|
||||
s3.private.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Private Endpoint
|
||||
s3.private.fra.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Frankfurt Private Endpoint
|
||||
s3.private.mil.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Milan Private Endpoint
|
||||
s3.private.ams.eu.cloud-object-storage.appdomain.cloud: EU Cross Region Amsterdam Private Endpoint
|
||||
s3.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Endpoint
|
||||
s3.private.eu-gb.cloud-object-storage.appdomain.cloud: Great Britain Private Endpoint
|
||||
s3.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Endpoint
|
||||
s3.private.eu-de.cloud-object-storage.appdomain.cloud: EU Region DE Private Endpoint
|
||||
s3.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Endpoint
|
||||
s3.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Endpoint
|
||||
s3.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Endpoint
|
||||
s3.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Endpoint
|
||||
s3.private.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Private Endpoint
|
||||
s3.private.tok.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Tokyo Private Endpoint
|
||||
s3.private.hkg.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Hong Kong Private Endpoint
|
||||
s3.private.seo.ap.cloud-object-storage.appdomain.cloud: APAC Cross Regional Seoul Private Endpoint
|
||||
s3.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Endpoint
|
||||
s3.private.jp-tok.cloud-object-storage.appdomain.cloud: APAC Region Japan Private Endpoint
|
||||
s3.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Endpoint
|
||||
s3.private.au-syd.cloud-object-storage.appdomain.cloud: APAC Region Australia Private Endpoint
|
||||
s3.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Endpoint
|
||||
s3.private.ams03.cloud-object-storage.appdomain.cloud: Amsterdam Single Site Private Endpoint
|
||||
s3.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Endpoint
|
||||
s3.private.che01.cloud-object-storage.appdomain.cloud: Chennai Single Site Private Endpoint
|
||||
s3.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Endpoint
|
||||
s3.private.mel01.cloud-object-storage.appdomain.cloud: Melbourne Single Site Private Endpoint
|
||||
s3.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Endpoint
|
||||
s3.private.osl01.cloud-object-storage.appdomain.cloud: Oslo Single Site Private Endpoint
|
||||
s3.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Endpoint
|
||||
s3.private.tor01.cloud-object-storage.appdomain.cloud: Toronto Single Site Private Endpoint
|
||||
s3.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Endpoint
|
||||
s3.private.seo01.cloud-object-storage.appdomain.cloud: Seoul Single Site Private Endpoint
|
||||
s3.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Endpoint
|
||||
s3.private.mon01.cloud-object-storage.appdomain.cloud: Montreal Single Site Private Endpoint
|
||||
s3.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Endpoint
|
||||
s3.private.mex01.cloud-object-storage.appdomain.cloud: Mexico Single Site Private Endpoint
|
||||
s3.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Endpoint
|
||||
s3.private.sjc04.cloud-object-storage.appdomain.cloud: San Jose Single Site Private Endpoint
|
||||
s3.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Endpoint
|
||||
s3.private.mil01.cloud-object-storage.appdomain.cloud: Milan Single Site Private Endpoint
|
||||
s3.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Endpoint
|
||||
s3.private.hkg02.cloud-object-storage.appdomain.cloud: Hong Kong Single Site Private Endpoint
|
||||
s3.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Endpoint
|
||||
s3.private.par01.cloud-object-storage.appdomain.cloud: Paris Single Site Private Endpoint
|
||||
s3.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Endpoint
|
||||
s3.private.sng01.cloud-object-storage.appdomain.cloud: Singapore Single Site Private Endpoint
|
||||
location_constraint:
|
||||
us-standard: US Cross Region Standard
|
||||
us-vault: US Cross Region Vault
|
||||
us-cold: US Cross Region Cold
|
||||
us-flex: US Cross Region Flex
|
||||
us-east-standard: US East Region Standard
|
||||
us-east-vault: US East Region Vault
|
||||
us-east-cold: US East Region Cold
|
||||
us-east-flex: US East Region Flex
|
||||
us-south-standard: US South Region Standard
|
||||
us-south-vault: US South Region Vault
|
||||
us-south-cold: US South Region Cold
|
||||
us-south-flex: US South Region Flex
|
||||
eu-standard: EU Cross Region Standard
|
||||
eu-vault: EU Cross Region Vault
|
||||
eu-cold: EU Cross Region Cold
|
||||
eu-flex: EU Cross Region Flex
|
||||
eu-gb-standard: Great Britain Standard
|
||||
eu-gb-vault: Great Britain Vault
|
||||
eu-gb-cold: Great Britain Cold
|
||||
eu-gb-flex: Great Britain Flex
|
||||
ap-standard: APAC Standard
|
||||
ap-vault: APAC Vault
|
||||
ap-cold: APAC Cold
|
||||
ap-flex: APAC Flex
|
||||
mel01-standard: Melbourne Standard
|
||||
mel01-vault: Melbourne Vault
|
||||
mel01-cold: Melbourne Cold
|
||||
mel01-flex: Melbourne Flex
|
||||
tor01-standard: Toronto Standard
|
||||
tor01-vault: Toronto Vault
|
||||
tor01-cold: Toronto Cold
|
||||
tor01-flex: Toronto Flex
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
This acl is available on IBM Cloud (Infra), On-Premise IBM COS.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
Not supported on Buckets.
|
||||
This acl is available on IBM Cloud (Infra) and On-Premise IBM COS.
|
||||
ibm_api_key: true
|
||||
ibm_resource_instance_id: true
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false # returns BucketAlreadyExists
|
||||
7
backend/s3/provider/IDrive.yaml
Normal file
7
backend/s3/provider/IDrive.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
name: IDrive
|
||||
description: IDrive e2
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
use_already_exists: false
|
||||
17
backend/s3/provider/IONOS.yaml
Normal file
17
backend/s3/provider/IONOS.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: IONOS
|
||||
description: IONOS Cloud
|
||||
region:
|
||||
de: Frankfurt, Germany
|
||||
eu-central-2: Berlin, Germany
|
||||
eu-south-2: Logrono, Spain
|
||||
endpoint:
|
||||
s3-eu-central-1.ionoscloud.com: Frankfurt, Germany
|
||||
s3-eu-central-2.ionoscloud.com: Berlin, Germany
|
||||
s3-eu-south-2.ionoscloud.com: Logrono, Spain
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
# listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
10
backend/s3/provider/Intercolo.yaml
Normal file
10
backend/s3/provider/Intercolo.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
name: Intercolo
|
||||
description: Intercolo Object Storage
|
||||
region:
|
||||
de-fra: Frankfurt, Germany
|
||||
endpoint:
|
||||
de-fra.i3storage.com: Frankfurt, Germany
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_unsigned_payload: false # has trailer support
|
||||
11
backend/s3/provider/Leviia.yaml
Normal file
11
backend/s3/provider/Leviia.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Leviia
|
||||
description: Leviia Object Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.leviia.com: |-
|
||||
The default endpoint
|
||||
Leviia
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_already_exists: false
|
||||
15
backend/s3/provider/Liara.yaml
Normal file
15
backend/s3/provider/Liara.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Liara
|
||||
description: Liara Object Storage
|
||||
endpoint:
|
||||
storage.iran.liara.space: |-
|
||||
The default endpoint
|
||||
Iran
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false # mulitpart ETags differ from AWS
|
||||
use_already_exists: false
|
||||
26
backend/s3/provider/Linode.yaml
Normal file
26
backend/s3/provider/Linode.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Linode
|
||||
description: Linode Object Storage
|
||||
endpoint:
|
||||
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
|
||||
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
|
||||
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
|
||||
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
|
||||
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
|
||||
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
|
||||
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
|
||||
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
|
||||
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
|
||||
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
|
||||
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
|
||||
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
|
||||
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
|
||||
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
|
||||
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
|
||||
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
|
||||
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
|
||||
ap-south-1.linodeobjects.com: Singapore, ap-south-1
|
||||
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
|
||||
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
|
||||
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
12
backend/s3/provider/LyveCloud.yaml
Normal file
12
backend/s3/provider/LyveCloud.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
name: LyveCloud
|
||||
description: Seagate Lyve Cloud
|
||||
region: {}
|
||||
endpoint:
|
||||
's3.us-west-1.{account_name}.lyve.seagate.com': US West 1 - California
|
||||
's3.eu-west-1.{account_name}.lyve.seagate.com': EU West 1 - Ireland
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false # multipart ETags differ from AWS
|
||||
use_already_exists: false
|
||||
16
backend/s3/provider/Magalu.yaml
Normal file
16
backend/s3/provider/Magalu.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Magalu
|
||||
description: Magalu Object Storage
|
||||
endpoint:
|
||||
br-se1.magaluobjects.com: São Paulo, SP (BR), br-se1
|
||||
br-ne1.magaluobjects.com: Fortaleza, CE (BR), br-ne1
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
GLACIER_IR: Glacier Instant Retrieval storage class
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
15
backend/s3/provider/Mega.yaml
Normal file
15
backend/s3/provider/Mega.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Mega
|
||||
description: MEGA S4 Object Storage
|
||||
endpoint:
|
||||
s3.eu-central-1.s4.mega.io: Mega S4 eu-central-1 (Amsterdam)
|
||||
s3.eu-central-2.s4.mega.io: Mega S4 eu-central-2 (Bettembourg)
|
||||
s3.ca-central-1.s4.mega.io: Mega S4 ca-central-1 (Montreal)
|
||||
s3.ca-west-1.s4.mega.io: Mega S4 ca-west-1 (Vancouver)
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 2
|
||||
force_path_style: true
|
||||
list_url_encode: true
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
copy_cutoff: 9223372036854775807
|
||||
18
backend/s3/provider/Minio.yaml
Normal file
18
backend/s3/provider/Minio.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Minio
|
||||
description: Minio Object Storage
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
server_side_encryption:
|
||||
'': None
|
||||
AES256: AES256
|
||||
aws:kms: aws:kms
|
||||
bucket_acl: true
|
||||
sse_customer_algorithm: true
|
||||
sse_customer_key: true
|
||||
sse_customer_key_base64: true
|
||||
sse_customer_key_md5: true
|
||||
sse_kms_key_id: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
12
backend/s3/provider/Netease.yaml
Normal file
12
backend/s3/provider/Netease.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Netease
|
||||
description: Netease Object Storage (NOS)
|
||||
region: {}
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
36
backend/s3/provider/OVHcloud.yaml
Normal file
36
backend/s3/provider/OVHcloud.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
name: OVHcloud
|
||||
description: OVHcloud Object Storage
|
||||
region:
|
||||
gra: Gravelines, France
|
||||
rbx: Roubaix, France
|
||||
sbg: Strasbourg, France
|
||||
eu-west-par: Paris, France (3AZ)
|
||||
de: Frankfurt, Germany
|
||||
uk: London, United Kingdom
|
||||
waw: Warsaw, Poland
|
||||
bhs: Beauharnois, Canada
|
||||
ca-east-tor: Toronto, Canada
|
||||
sgp: Singapore
|
||||
ap-southeast-syd: Sydney, Australia
|
||||
ap-south-mum: Mumbai, India
|
||||
us-east-va: Vint Hill, Virginia, USA
|
||||
us-west-or: Hillsboro, Oregon, USA
|
||||
rbx-archive: Roubaix, France (Cold Archive)
|
||||
endpoint:
|
||||
s3.gra.io.cloud.ovh.net: OVHcloud Gravelines, France
|
||||
s3.rbx.io.cloud.ovh.net: OVHcloud Roubaix, France
|
||||
s3.sbg.io.cloud.ovh.net: OVHcloud Strasbourg, France
|
||||
s3.eu-west-par.io.cloud.ovh.net: OVHcloud Paris, France (3AZ)
|
||||
s3.de.io.cloud.ovh.net: OVHcloud Frankfurt, Germany
|
||||
s3.uk.io.cloud.ovh.net: OVHcloud London, United Kingdom
|
||||
s3.waw.io.cloud.ovh.net: OVHcloud Warsaw, Poland
|
||||
s3.bhs.io.cloud.ovh.net: OVHcloud Beauharnois, Canada
|
||||
s3.ca-east-tor.io.cloud.ovh.net: OVHcloud Toronto, Canada
|
||||
s3.sgp.io.cloud.ovh.net: OVHcloud Singapore
|
||||
s3.ap-southeast-syd.io.cloud.ovh.net: OVHcloud Sydney, Australia
|
||||
s3.ap-south-mum.io.cloud.ovh.net: OVHcloud Mumbai, India
|
||||
s3.us-east-va.io.cloud.ovh.us: OVHcloud Vint Hill, Virginia, USA
|
||||
s3.us-west-or.io.cloud.ovh.us: OVHcloud Hillsboro, Oregon, USA
|
||||
s3.rbx-archive.io.cloud.ovh.net: OVHcloud Roubaix, France (Cold Archive)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
39
backend/s3/provider/Other.yaml
Normal file
39
backend/s3/provider/Other.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Other
|
||||
description: Any other S3 compatible provider
|
||||
region:
|
||||
'': |-
|
||||
Use this if unsure.
|
||||
Will use v4 signatures and an empty region.
|
||||
other-v2-signature: |-
|
||||
Use this only if v4 signatures don't work.
|
||||
E.g. pre Jewel/v10 CEPH.
|
||||
endpoint: {}
|
||||
location_constraint: {}
|
||||
acl:
|
||||
private: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
bucket-owner-read: |-
|
||||
Object owner gets FULL_CONTROL.
|
||||
Bucket owner gets READ access.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket-owner-full-control: |-
|
||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
18
backend/s3/provider/Outscale.yaml
Normal file
18
backend/s3/provider/Outscale.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Outscale
|
||||
description: OUTSCALE Object Storage (OOS)
|
||||
region:
|
||||
eu-west-2: Paris, France
|
||||
us-east-2: New Jersey, USA
|
||||
us-west-1: California, USA
|
||||
cloudgouv-eu-west-1: SecNumCloud, Paris, France
|
||||
ap-northeast-1: Tokyo, Japan
|
||||
endpoint:
|
||||
oos.eu-west-2.outscale.com: Outscale EU West 2 (Paris)
|
||||
oos.us-east-2.outscale.com: Outscale US east 2 (New Jersey)
|
||||
oos.us-west-1.outscale.com: Outscale EU West 1 (California)
|
||||
oos.cloudgouv-eu-west-1.outscale.com: Outscale SecNumCloud (Paris)
|
||||
oos.ap-northeast-1.outscale.com: Outscale AP Northeast 1 (Japan)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
force_path_style: true
|
||||
19
backend/s3/provider/Petabox.yaml
Normal file
19
backend/s3/provider/Petabox.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
name: Petabox
|
||||
description: Petabox Object Storage
|
||||
region:
|
||||
us-east-1: US East (N. Virginia)
|
||||
eu-central-1: Europe (Frankfurt)
|
||||
ap-southeast-1: Asia Pacific (Singapore)
|
||||
me-south-1: Middle East (Bahrain)
|
||||
sa-east-1: South America (São Paulo)
|
||||
endpoint:
|
||||
s3.petabox.io: US East (N. Virginia)
|
||||
s3.us-east-1.petabox.io: US East (N. Virginia)
|
||||
s3.eu-central-1.petabox.io: Europe (Frankfurt)
|
||||
s3.ap-southeast-1.petabox.io: Asia Pacific (Singapore)
|
||||
s3.me-south-1.petabox.io: Middle East (Bahrain)
|
||||
s3.sa-east-1.petabox.io: South America (São Paulo)
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_already_exists: false
|
||||
53
backend/s3/provider/Qiniu.yaml
Normal file
53
backend/s3/provider/Qiniu.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Qiniu
|
||||
description: Qiniu Object Storage (Kodo)
|
||||
region:
|
||||
cn-east-1: |-
|
||||
The default endpoint - a good choice if you are unsure.
|
||||
East China Region 1.
|
||||
Needs location constraint cn-east-1.
|
||||
cn-east-2: |-
|
||||
East China Region 2.
|
||||
Needs location constraint cn-east-2.
|
||||
cn-north-1: |-
|
||||
North China Region 1.
|
||||
Needs location constraint cn-north-1.
|
||||
cn-south-1: |-
|
||||
South China Region 1.
|
||||
Needs location constraint cn-south-1.
|
||||
us-north-1: |-
|
||||
North America Region.
|
||||
Needs location constraint us-north-1.
|
||||
ap-southeast-1: |-
|
||||
Southeast Asia Region 1.
|
||||
Needs location constraint ap-southeast-1.
|
||||
ap-northeast-1: |-
|
||||
Northeast Asia Region 1.
|
||||
Needs location constraint ap-northeast-1.
|
||||
endpoint:
|
||||
s3-cn-east-1.qiniucs.com: East China Endpoint 1
|
||||
s3-cn-east-2.qiniucs.com: East China Endpoint 2
|
||||
s3-cn-north-1.qiniucs.com: North China Endpoint 1
|
||||
s3-cn-south-1.qiniucs.com: South China Endpoint 1
|
||||
s3-us-north-1.qiniucs.com: North America Endpoint 1
|
||||
s3-ap-southeast-1.qiniucs.com: Southeast Asia Endpoint 1
|
||||
s3-ap-northeast-1.qiniucs.com: Northeast Asia Endpoint 1
|
||||
location_constraint:
|
||||
cn-east-1: East China Region 1
|
||||
cn-east-2: East China Region 2
|
||||
cn-north-1: North China Region 1
|
||||
cn-south-1: South China Region 1
|
||||
us-north-1: North America Region 1
|
||||
ap-southeast-1: Southeast Asia Region 1
|
||||
ap-northeast-1: Northeast Asia Region 1
|
||||
acl: {}
|
||||
storage_class:
|
||||
STANDARD: Standard storage class
|
||||
LINE: Infrequent access storage mode
|
||||
GLACIER: Archive storage mode
|
||||
DEEP_ARCHIVE: Deep archive storage mode
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
list_url_encode: false
|
||||
force_path_style: true
|
||||
use_already_exists: false
|
||||
15
backend/s3/provider/Rabata.yaml
Normal file
15
backend/s3/provider/Rabata.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
name: Rabata
|
||||
description: Rabata Cloud Storage
|
||||
region:
|
||||
us-east-1: US East (N. Virginia)
|
||||
eu-west-1: EU (Ireland)
|
||||
eu-west-2: EU (London)
|
||||
endpoint:
|
||||
s3.us-east-1.rabata.io: US East (N. Virginia)
|
||||
s3.eu-west-1.rabata.io: EU West (Ireland)
|
||||
s3.eu-west-2.rabata.io: EU West (London)
|
||||
location_constraint:
|
||||
us-east-1: US East (N. Virginia)
|
||||
eu-west-1: EU (Ireland)
|
||||
eu-west-2: EU (London)
|
||||
# server side copy not supported
|
||||
67
backend/s3/provider/RackCorp.yaml
Normal file
67
backend/s3/provider/RackCorp.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
name: RackCorp
|
||||
description: RackCorp Object Storage
|
||||
region:
|
||||
global: Global CDN (All locations) Region
|
||||
au: Australia (All states)
|
||||
au-nsw: NSW (Australia) Region
|
||||
au-qld: QLD (Australia) Region
|
||||
au-vic: VIC (Australia) Region
|
||||
au-wa: Perth (Australia) Region
|
||||
ph: Manila (Philippines) Region
|
||||
th: Bangkok (Thailand) Region
|
||||
hk: HK (Hong Kong) Region
|
||||
mn: Ulaanbaatar (Mongolia) Region
|
||||
kg: Bishkek (Kyrgyzstan) Region
|
||||
id: Jakarta (Indonesia) Region
|
||||
jp: Tokyo (Japan) Region
|
||||
sg: SG (Singapore) Region
|
||||
de: Frankfurt (Germany) Region
|
||||
us: USA (AnyCast) Region
|
||||
us-east-1: New York (USA) Region
|
||||
us-west-1: Freemont (USA) Region
|
||||
nz: Auckland (New Zealand) Region
|
||||
endpoint:
|
||||
s3.rackcorp.com: Global (AnyCast) Endpoint
|
||||
au.s3.rackcorp.com: Australia (Anycast) Endpoint
|
||||
au-nsw.s3.rackcorp.com: Sydney (Australia) Endpoint
|
||||
au-qld.s3.rackcorp.com: Brisbane (Australia) Endpoint
|
||||
au-vic.s3.rackcorp.com: Melbourne (Australia) Endpoint
|
||||
au-wa.s3.rackcorp.com: Perth (Australia) Endpoint
|
||||
ph.s3.rackcorp.com: Manila (Philippines) Endpoint
|
||||
th.s3.rackcorp.com: Bangkok (Thailand) Endpoint
|
||||
hk.s3.rackcorp.com: HK (Hong Kong) Endpoint
|
||||
mn.s3.rackcorp.com: Ulaanbaatar (Mongolia) Endpoint
|
||||
kg.s3.rackcorp.com: Bishkek (Kyrgyzstan) Endpoint
|
||||
id.s3.rackcorp.com: Jakarta (Indonesia) Endpoint
|
||||
jp.s3.rackcorp.com: Tokyo (Japan) Endpoint
|
||||
sg.s3.rackcorp.com: SG (Singapore) Endpoint
|
||||
de.s3.rackcorp.com: Frankfurt (Germany) Endpoint
|
||||
us.s3.rackcorp.com: USA (AnyCast) Endpoint
|
||||
us-east-1.s3.rackcorp.com: New York (USA) Endpoint
|
||||
us-west-1.s3.rackcorp.com: Freemont (USA) Endpoint
|
||||
nz.s3.rackcorp.com: Auckland (New Zealand) Endpoint
|
||||
location_constraint:
|
||||
global: Global CDN Region
|
||||
au: Australia (All locations)
|
||||
au-nsw: NSW (Australia) Region
|
||||
au-qld: QLD (Australia) Region
|
||||
au-vic: VIC (Australia) Region
|
||||
au-wa: Perth (Australia) Region
|
||||
ph: Manila (Philippines) Region
|
||||
th: Bangkok (Thailand) Region
|
||||
hk: HK (Hong Kong) Region
|
||||
mn: Ulaanbaatar (Mongolia) Region
|
||||
kg: Bishkek (Kyrgyzstan) Region
|
||||
id: Jakarta (Indonesia) Region
|
||||
jp: Tokyo (Japan) Region
|
||||
sg: SG (Singapore) Region
|
||||
de: Frankfurt (Germany) Region
|
||||
us: USA (AnyCast) Region
|
||||
us-east-1: New York (USA) Region
|
||||
us-west-1: Fremont (USA) Region
|
||||
nz: Auckland (New Zealand) Region
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
11
backend/s3/provider/Rclone.yaml
Normal file
11
backend/s3/provider/Rclone.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Rclone
|
||||
description: Rclone S3 Server
|
||||
endpoint: {}
|
||||
quirks:
|
||||
force_path_style: true
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
# rclone serve doesn't support multi-part server side copy:
|
||||
# See: https://github.com/rclone/rclone/issues/7454
|
||||
# So make cutoff very large which it does support
|
||||
copy_cutoff: 9223372036854775807
|
||||
28
backend/s3/provider/Scaleway.yaml
Normal file
28
backend/s3/provider/Scaleway.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Scaleway
|
||||
description: Scaleway Object Storage
|
||||
region:
|
||||
nl-ams: Amsterdam, The Netherlands
|
||||
fr-par: Paris, France
|
||||
pl-waw: Warsaw, Poland
|
||||
endpoint:
|
||||
s3.nl-ams.scw.cloud: Amsterdam Endpoint
|
||||
s3.fr-par.scw.cloud: Paris Endpoint
|
||||
s3.pl-waw.scw.cloud: Warsaw Endpoint
|
||||
acl: {}
|
||||
storage_class:
|
||||
'': Default.
|
||||
STANDARD: |-
|
||||
The Standard class for any upload.
|
||||
Suitable for on-demand content like streaming or CDN.
|
||||
Available in all regions.
|
||||
GLACIER: |-
|
||||
Archived storage.
|
||||
Prices are lower, but it needs to be restored first to be accessed.
|
||||
Available in FR-PAR and NL-AMS regions.
|
||||
ONEZONE_IA: |-
|
||||
One Zone - Infrequent Access.
|
||||
A good choice for storing secondary backup copies or easily re-creatable data.
|
||||
Available in the FR-PAR region only.
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
max_upload_parts: 1000
|
||||
14
backend/s3/provider/SeaweedFS.yaml
Normal file
14
backend/s3/provider/SeaweedFS.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: SeaweedFS
|
||||
description: SeaweedFS S3
|
||||
region: {}
|
||||
endpoint:
|
||||
localhost:8333: SeaweedFS S3 localhost
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
8
backend/s3/provider/Selectel.yaml
Normal file
8
backend/s3/provider/Selectel.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
name: Selectel
|
||||
description: Selectel Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
17
backend/s3/provider/Servercore.yaml
Normal file
17
backend/s3/provider/Servercore.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Servercore
|
||||
description: Servercore Object Storage
|
||||
region:
|
||||
ru-1: St. Petersburg
|
||||
gis-1: Moscow
|
||||
ru-7: Moscow
|
||||
uz-2: Tashkent, Uzbekistan
|
||||
kz-1: Almaty, Kazakhstan
|
||||
endpoint:
|
||||
s3.ru-1.storage.selcloud.ru: Saint Petersburg
|
||||
s3.gis-1.storage.selcloud.ru: Moscow
|
||||
s3.ru-7.storage.selcloud.ru: Moscow
|
||||
s3.uz-2.srvstorage.uz: Tashkent, Uzbekistan
|
||||
s3.kz-1.srvstorage.kz: Almaty, Kazakhstan
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_url_encode: false
|
||||
5
backend/s3/provider/SpectraLogic.yaml
Normal file
5
backend/s3/provider/SpectraLogic.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
name: SpectraLogic
|
||||
description: Spectra Logic Black Pearl
|
||||
endpoint: {}
|
||||
quirks:
|
||||
force_path_style: true # path-style required
|
||||
14
backend/s3/provider/StackPath.yaml
Normal file
14
backend/s3/provider/StackPath.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: StackPath
|
||||
description: StackPath Object Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.us-east-2.stackpathstorage.com: US East Endpoint
|
||||
s3.us-west-1.stackpathstorage.com: US West Endpoint
|
||||
s3.eu-central-1.stackpathstorage.com: EU Endpoint
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
force_path_style: true
|
||||
list_url_encode: false
|
||||
use_already_exists: false
|
||||
11
backend/s3/provider/Storj.yaml
Normal file
11
backend/s3/provider/Storj.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
name: Storj
|
||||
description: Storj (S3 Compatible Gateway)
|
||||
endpoint:
|
||||
gateway.storjshare.io: Global Hosted Gateway
|
||||
quirks:
|
||||
use_already_exists: false # returns BucketAlreadyExists
|
||||
# Storj doesn't support multi-part server side copy:
|
||||
# https://github.com/storj/roadmap/issues/40
|
||||
# So make cutoff very large which it does support
|
||||
copy_cutoff: 9223372036854775807
|
||||
min_chunk_size: 67108864
|
||||
18
backend/s3/provider/Synology.yaml
Normal file
18
backend/s3/provider/Synology.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Synology
|
||||
description: Synology C2 Object Storage
|
||||
region:
|
||||
eu-001: Europe Region 1
|
||||
eu-002: Europe Region 2
|
||||
us-001: US Region 1
|
||||
us-002: US Region 2
|
||||
tw-001: Asia (Taiwan)
|
||||
endpoint:
|
||||
eu-001.s3.synologyc2.net: EU Endpoint 1
|
||||
eu-002.s3.synologyc2.net: EU Endpoint 2
|
||||
us-001.s3.synologyc2.net: US Endpoint 1
|
||||
us-002.s3.synologyc2.net: US Endpoint 2
|
||||
tw-001.s3.synologyc2.net: TW Endpoint 1
|
||||
location_constraint: {}
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
52
backend/s3/provider/TencentCOS.yaml
Normal file
52
backend/s3/provider/TencentCOS.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
name: TencentCOS
|
||||
description: Tencent Cloud Object Storage (COS)
|
||||
endpoint:
|
||||
cos.ap-beijing.myqcloud.com: Beijing Region
|
||||
cos.ap-nanjing.myqcloud.com: Nanjing Region
|
||||
cos.ap-shanghai.myqcloud.com: Shanghai Region
|
||||
cos.ap-guangzhou.myqcloud.com: Guangzhou Region
|
||||
cos.ap-chengdu.myqcloud.com: Chengdu Region
|
||||
cos.ap-chongqing.myqcloud.com: Chongqing Region
|
||||
cos.ap-hongkong.myqcloud.com: Hong Kong (China) Region
|
||||
cos.ap-singapore.myqcloud.com: Singapore Region
|
||||
cos.ap-mumbai.myqcloud.com: Mumbai Region
|
||||
cos.ap-seoul.myqcloud.com: Seoul Region
|
||||
cos.ap-bangkok.myqcloud.com: Bangkok Region
|
||||
cos.ap-tokyo.myqcloud.com: Tokyo Region
|
||||
cos.na-siliconvalley.myqcloud.com: Silicon Valley Region
|
||||
cos.na-ashburn.myqcloud.com: Virginia Region
|
||||
cos.na-toronto.myqcloud.com: Toronto Region
|
||||
cos.eu-frankfurt.myqcloud.com: Frankfurt Region
|
||||
cos.eu-moscow.myqcloud.com: Moscow Region
|
||||
cos.accelerate.myqcloud.com: Use Tencent COS Accelerate Endpoint
|
||||
acl:
|
||||
default: |-
|
||||
Owner gets Full_CONTROL.
|
||||
No one else has access rights (default).
|
||||
public-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ access.
|
||||
public-read-write: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AllUsers group gets READ and WRITE access.
|
||||
Granting this on a bucket is generally not recommended.
|
||||
authenticated-read: |-
|
||||
Owner gets FULL_CONTROL.
|
||||
The AuthenticatedUsers group gets READ access.
|
||||
bucket-owner-read: |-
|
||||
Object owner gets FULL_CONTROL.
|
||||
Bucket owner gets READ access.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
bucket-owner-full-control: |-
|
||||
Both the object owner and the bucket owner get FULL_CONTROL over the object.
|
||||
If you specify this canned ACL when creating a bucket, Amazon S3 ignores it.
|
||||
storage_class:
|
||||
'': Default
|
||||
STANDARD: Standard storage class
|
||||
ARCHIVE: Archive storage mode
|
||||
STANDARD_IA: Infrequent access storage mode
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
list_version: 1
|
||||
use_multipart_etag: false
|
||||
use_already_exists: false
|
||||
21
backend/s3/provider/Wasabi.yaml
Normal file
21
backend/s3/provider/Wasabi.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Wasabi
|
||||
description: Wasabi Object Storage
|
||||
region: {}
|
||||
endpoint:
|
||||
s3.wasabisys.com: Wasabi US East 1 (N. Virginia)
|
||||
s3.us-east-2.wasabisys.com: Wasabi US East 2 (N. Virginia)
|
||||
s3.us-central-1.wasabisys.com: Wasabi US Central 1 (Texas)
|
||||
s3.us-west-1.wasabisys.com: Wasabi US West 1 (Oregon)
|
||||
s3.ca-central-1.wasabisys.com: Wasabi CA Central 1 (Toronto)
|
||||
s3.eu-central-1.wasabisys.com: Wasabi EU Central 1 (Amsterdam)
|
||||
s3.eu-central-2.wasabisys.com: Wasabi EU Central 2 (Frankfurt)
|
||||
s3.eu-west-1.wasabisys.com: Wasabi EU West 1 (London)
|
||||
s3.eu-west-2.wasabisys.com: Wasabi EU West 2 (Paris)
|
||||
s3.eu-south-1.wasabisys.com: Wasabi EU South 1 (Milan)
|
||||
s3.ap-northeast-1.wasabisys.com: Wasabi AP Northeast 1 (Tokyo) endpoint
|
||||
s3.ap-northeast-2.wasabisys.com: Wasabi AP Northeast 2 (Osaka) endpoint
|
||||
s3.ap-southeast-1.wasabisys.com: Wasabi AP Southeast 1 (Singapore)
|
||||
s3.ap-southeast-2.wasabisys.com: Wasabi AP Southeast 2 (Sydney)
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
14
backend/s3/provider/Zata.yaml
Normal file
14
backend/s3/provider/Zata.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Zata
|
||||
description: Zata (S3 compatible Gateway)
|
||||
region:
|
||||
us-east-1: Indore, Madhya Pradesh, India
|
||||
endpoint:
|
||||
idr01.zata.ai: South Asia Endpoint
|
||||
location_constraint: {}
|
||||
acl: {}
|
||||
bucket_acl: true
|
||||
quirks:
|
||||
use_multipart_etag: false
|
||||
might_gzip: false
|
||||
use_unsigned_payload: false
|
||||
use_already_exists: false
|
||||
236
backend/s3/providers.go
Normal file
236
backend/s3/providers.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"embed"
|
||||
stdfs "io/fs"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
orderedmap "github.com/wk8/go-ordered-map/v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// YamlMap is converted to YAML in the correct order
|
||||
type YamlMap = *orderedmap.OrderedMap[string, string]
|
||||
|
||||
// NewYamlMap creates a new ordered map
|
||||
var NewYamlMap = orderedmap.New[string, string]
|
||||
|
||||
// Quirks defines all the S3 provider quirks
|
||||
type Quirks struct {
|
||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||
}
|
||||
|
||||
// Provider defines the configurable data in each provider.yaml
|
||||
type Provider struct {
|
||||
Name string `yaml:"name,omitempty"`
|
||||
Description string `yaml:"description,omitempty"`
|
||||
Region YamlMap `yaml:"region,omitempty"`
|
||||
Endpoint YamlMap `yaml:"endpoint,omitempty"`
|
||||
LocationConstraint YamlMap `yaml:"location_constraint,omitempty"`
|
||||
ACL YamlMap `yaml:"acl,omitempty"`
|
||||
StorageClass YamlMap `yaml:"storage_class,omitempty"`
|
||||
ServerSideEncryption YamlMap `yaml:"server_side_encryption,omitempty"`
|
||||
|
||||
// other
|
||||
IBMApiKey bool `yaml:"ibm_api_key,omitempty"`
|
||||
IBMResourceInstanceID bool `yaml:"ibm_resource_instance_id,omitempty"`
|
||||
|
||||
// advanced
|
||||
BucketACL bool `yaml:"bucket_acl,omitempty"`
|
||||
DirectoryBucket bool `yaml:"directory_bucket,omitempty"`
|
||||
LeavePartsOnError bool `yaml:"leave_parts_on_error,omitempty"`
|
||||
RequesterPays bool `yaml:"requester_pays,omitempty"`
|
||||
SSECustomerAlgorithm bool `yaml:"sse_customer_algorithm,omitempty"`
|
||||
SSECustomerKey bool `yaml:"sse_customer_key,omitempty"`
|
||||
SSECustomerKeyBase64 bool `yaml:"sse_customer_key_base64,omitempty"`
|
||||
SSECustomerKeyMd5 bool `yaml:"sse_customer_key_md5,omitempty"`
|
||||
SSEKmsKeyID bool `yaml:"sse_kms_key_id,omitempty"`
|
||||
STSEndpoint bool `yaml:"sts_endpoint,omitempty"`
|
||||
UseAccelerateEndpoint bool `yaml:"use_accelerate_endpoint,omitempty"`
|
||||
|
||||
Quirks Quirks `yaml:"quirks,omitempty"`
|
||||
}
|
||||
|
||||
//go:embed provider/*.yaml
|
||||
var providerFS embed.FS
|
||||
|
||||
// addProvidersToInfo adds provider information to the fs.RegInfo
|
||||
func addProvidersToInfo(info *fs.RegInfo) *fs.RegInfo {
|
||||
providerMap := loadProviders()
|
||||
providerList := constructProviders(info.Options, providerMap)
|
||||
info.Description += strings.TrimSuffix(providerList, ", ")
|
||||
return info
|
||||
}
|
||||
|
||||
// loadProvider loads a single provider
|
||||
//
|
||||
// It returns nil if it could not be found except if "Other" which is a fatal error.
|
||||
func loadProvider(name string) *Provider {
|
||||
data, err := stdfs.ReadFile(providerFS, "provider/"+name+".yaml")
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) && name != "Other" {
|
||||
return nil
|
||||
}
|
||||
fs.Fatalf(nil, "internal error: failed to load provider %q: %v", name, err)
|
||||
}
|
||||
var p Provider
|
||||
err = yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "internal error: failed to unmarshal provider %q: %v", name, err)
|
||||
}
|
||||
return &p
|
||||
}
|
||||
|
||||
// loadProviders loads provider definitions from embedded YAML files
|
||||
func loadProviders() map[string]*Provider {
|
||||
providers, err := stdfs.ReadDir(providerFS, "provider")
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "internal error: failed to read embedded providers: %v", err)
|
||||
}
|
||||
providerMap := make(map[string]*Provider, len(providers))
|
||||
|
||||
for _, provider := range providers {
|
||||
name, _ := strings.CutSuffix(provider.Name(), ".yaml")
|
||||
p := loadProvider(name)
|
||||
providerMap[p.Name] = p
|
||||
}
|
||||
return providerMap
|
||||
}
|
||||
|
||||
// constructProviders populates fs.Options with provider-specific examples and information
|
||||
func constructProviders(options fs.Options, providerMap map[string]*Provider) string {
|
||||
// Defaults for map options set to {}
|
||||
defaults := providerMap["Other"]
|
||||
|
||||
// sort providers: AWS first, Other last, rest alphabetically
|
||||
providers := make([]*Provider, 0, len(providerMap))
|
||||
for _, p := range providerMap {
|
||||
providers = append(providers, p)
|
||||
}
|
||||
sort.Slice(providers, func(i, j int) bool {
|
||||
if providers[i].Name == "AWS" {
|
||||
return true
|
||||
}
|
||||
if providers[j].Name == "AWS" {
|
||||
return false
|
||||
}
|
||||
if providers[i].Name == "Other" {
|
||||
return false
|
||||
}
|
||||
if providers[j].Name == "Other" {
|
||||
return true
|
||||
}
|
||||
return strings.ToLower(providers[i].Name) < strings.ToLower(providers[j].Name)
|
||||
})
|
||||
|
||||
addProvider := func(sp *string, name string) {
|
||||
if *sp != "" {
|
||||
*sp += ","
|
||||
}
|
||||
*sp += name
|
||||
}
|
||||
|
||||
addBool := func(opt *fs.Option, p *Provider, flag bool) {
|
||||
if flag {
|
||||
addProvider(&opt.Provider, p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
addExample := func(opt *fs.Option, p *Provider, examples, defaultExamples YamlMap) {
|
||||
if examples == nil {
|
||||
return
|
||||
}
|
||||
if examples.Len() == 0 {
|
||||
examples = defaultExamples
|
||||
}
|
||||
addProvider(&opt.Provider, p.Name)
|
||||
OUTER:
|
||||
for pair := examples.Oldest(); pair != nil; pair = pair.Next() {
|
||||
// Find an existing example to add to if possible
|
||||
for i, example := range opt.Examples {
|
||||
if example.Value == pair.Key && example.Help == pair.Value {
|
||||
addProvider(&opt.Examples[i].Provider, p.Name)
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
// Otherwise add a new one
|
||||
opt.Examples = append(opt.Examples, fs.OptionExample{
|
||||
Value: pair.Key,
|
||||
Help: pair.Value,
|
||||
Provider: p.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var providerList strings.Builder
|
||||
|
||||
for _, p := range providers {
|
||||
for i := range options {
|
||||
opt := &options[i]
|
||||
switch opt.Name {
|
||||
case "provider":
|
||||
opt.Examples = append(opt.Examples, fs.OptionExample{
|
||||
Value: p.Name,
|
||||
Help: p.Description,
|
||||
})
|
||||
providerList.WriteString(p.Name + ", ")
|
||||
case "region":
|
||||
addExample(opt, p, p.Region, defaults.Region)
|
||||
case "endpoint":
|
||||
addExample(opt, p, p.Endpoint, defaults.Endpoint)
|
||||
case "location_constraint":
|
||||
addExample(opt, p, p.LocationConstraint, defaults.LocationConstraint)
|
||||
case "acl":
|
||||
addExample(opt, p, p.ACL, defaults.ACL)
|
||||
case "storage_class":
|
||||
addExample(opt, p, p.StorageClass, defaults.StorageClass)
|
||||
case "server_side_encryption":
|
||||
addExample(opt, p, p.ServerSideEncryption, defaults.ServerSideEncryption)
|
||||
case "bucket_acl":
|
||||
addBool(opt, p, p.BucketACL)
|
||||
case "requester_pays":
|
||||
addBool(opt, p, p.RequesterPays)
|
||||
case "sse_customer_algorithm":
|
||||
addBool(opt, p, p.SSECustomerAlgorithm)
|
||||
case "sse_kms_key_id":
|
||||
addBool(opt, p, p.SSEKmsKeyID)
|
||||
case "sse_customer_key":
|
||||
addBool(opt, p, p.SSECustomerKey)
|
||||
case "sse_customer_key_base64":
|
||||
addBool(opt, p, p.SSECustomerKeyBase64)
|
||||
case "sse_customer_key_md5":
|
||||
addBool(opt, p, p.SSECustomerKeyMd5)
|
||||
case "directory_bucket":
|
||||
addBool(opt, p, p.DirectoryBucket)
|
||||
case "ibm_api_key":
|
||||
addBool(opt, p, p.IBMApiKey)
|
||||
case "ibm_resource_instance_id":
|
||||
addBool(opt, p, p.IBMResourceInstanceID)
|
||||
case "leave_parts_on_error":
|
||||
addBool(opt, p, p.LeavePartsOnError)
|
||||
case "sts_endpoint":
|
||||
addBool(opt, p, p.STSEndpoint)
|
||||
case "use_accelerate_endpoint":
|
||||
addBool(opt, p, p.UseAccelerateEndpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(providerList.String(), ", ")
|
||||
}
|
||||
2587
backend/s3/s3.go
2587
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -62,14 +62,14 @@ func TestAWSDualStackOption(t *testing.T) {
|
||||
// test enabled
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
opt.UseDualStack = true
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
s3Conn, _, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateEnabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
{
|
||||
// test default case
|
||||
ctx, opt, client := SetupS3Test(t)
|
||||
s3Conn, err := s3Connection(ctx, opt, client)
|
||||
s3Conn, _, err := s3Connection(ctx, opt, client)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, aws.DualStackEndpointStateDisabled, s3Conn.Options().EndpointOptions.UseDualStackEndpoint)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os/exec"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -50,6 +51,9 @@ func (s *sshClientExternal) Close() error {
|
||||
func (s *sshClientExternal) NewSession() (sshSession, error) {
|
||||
session := s.f.newSSHSessionExternal()
|
||||
if s.session == nil {
|
||||
// Store the first session so Wait() and Close() can use it
|
||||
s.session = session
|
||||
} else {
|
||||
fs.Debugf(s.f, "ssh external: creating additional session")
|
||||
}
|
||||
return session, nil
|
||||
@@ -76,6 +80,8 @@ type sshSessionExternal struct {
|
||||
cancel func()
|
||||
startCalled bool
|
||||
runningSFTP bool
|
||||
waitOnce sync.Once // ensure Wait() is only called once
|
||||
waitErr error // result of the Wait() call
|
||||
}
|
||||
|
||||
func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
||||
@@ -175,16 +181,17 @@ func (s *sshSessionExternal) exited() bool {
|
||||
|
||||
// Wait for the command to exit
|
||||
func (s *sshSessionExternal) Wait() error {
|
||||
if s.exited() {
|
||||
return nil
|
||||
}
|
||||
err := s.cmd.Wait()
|
||||
if err == nil {
|
||||
fs.Debugf(s.f, "ssh external: command exited OK")
|
||||
} else {
|
||||
fs.Debugf(s.f, "ssh external: command exited with error: %v", err)
|
||||
}
|
||||
return err
|
||||
// Use sync.Once to ensure we only wait for the process once.
|
||||
// This is safe even if Wait() is called from multiple goroutines.
|
||||
s.waitOnce.Do(func() {
|
||||
s.waitErr = s.cmd.Wait()
|
||||
if s.waitErr == nil {
|
||||
fs.Debugf(s.f, "ssh external: command exited OK")
|
||||
} else {
|
||||
fs.Debugf(s.f, "ssh external: command exited with error: %v", s.waitErr)
|
||||
}
|
||||
})
|
||||
return s.waitErr
|
||||
}
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
|
||||
84
backend/sftp/ssh_external_test.go
Normal file
84
backend/sftp/ssh_external_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
//go:build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestSSHExternalWaitMultipleCalls verifies that calling Wait() multiple times
|
||||
// doesn't cause zombie processes
|
||||
func TestSSHExternalWaitMultipleCalls(t *testing.T) {
|
||||
// Create a minimal Fs object for testing
|
||||
opt := &Options{
|
||||
SSH: fs.SpaceSepList{"echo", "test"},
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
opt: *opt,
|
||||
}
|
||||
|
||||
// Create a new SSH session
|
||||
session := f.newSSHSessionExternal()
|
||||
|
||||
// Start a simple command that exits quickly
|
||||
err := session.Start("exit 0")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Give the command time to complete
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Call Wait() multiple times - this should not cause issues
|
||||
err1 := session.Wait()
|
||||
err2 := session.Wait()
|
||||
err3 := session.Wait()
|
||||
|
||||
// All calls should return the same result (no error in this case)
|
||||
assert.NoError(t, err1)
|
||||
assert.NoError(t, err2)
|
||||
assert.NoError(t, err3)
|
||||
|
||||
// Verify the process has exited
|
||||
assert.True(t, session.exited())
|
||||
}
|
||||
|
||||
// TestSSHExternalCloseMultipleCalls verifies that calling Close() multiple times
|
||||
// followed by Wait() calls doesn't cause zombie processes
|
||||
func TestSSHExternalCloseMultipleCalls(t *testing.T) {
|
||||
// Create a minimal Fs object for testing
|
||||
opt := &Options{
|
||||
SSH: fs.SpaceSepList{"sleep", "10"},
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
opt: *opt,
|
||||
}
|
||||
|
||||
// Create a new SSH session
|
||||
session := f.newSSHSessionExternal()
|
||||
|
||||
// Start a long-running command
|
||||
err := session.Start("sleep 10")
|
||||
if err != nil {
|
||||
t.Skip("Cannot start sleep command:", err)
|
||||
}
|
||||
|
||||
// Close should cancel and wait for the process
|
||||
_ = session.Close()
|
||||
|
||||
// Additional Wait() calls should return the same error
|
||||
err2 := session.Wait()
|
||||
err3 := session.Wait()
|
||||
|
||||
// All should complete without panicking
|
||||
// err1 could be nil or an error depending on how the process was killed
|
||||
// err2 and err3 should be the same
|
||||
assert.Equal(t, err2, err3, "Subsequent Wait() calls should return same result")
|
||||
|
||||
// Verify the process has exited
|
||||
assert.True(t, session.exited())
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
// Code generated by vfsgen; DO NOT EDIT.
|
||||
|
||||
//go:build !dev
|
||||
// +build !dev
|
||||
|
||||
package sharefile
|
||||
|
||||
|
||||
@@ -192,6 +192,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if root is empty or ends with / (must be a directory)
|
||||
isRootDir := isPathDir(root)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
@@ -218,6 +221,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Skip stat check if root is already a directory
|
||||
if isRootDir {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -894,6 +902,11 @@ func ensureSuffix(s, suffix string) string {
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
// isPathDir determines if a path represents a directory based on trailing slash
|
||||
func isPathDir(path string) bool {
|
||||
return path == "" || strings.HasSuffix(path, "/")
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
|
||||
41
backend/smb/smb_internal_test.go
Normal file
41
backend/smb/smb_internal_test.go
Normal file
@@ -0,0 +1,41 @@
|
||||
// Unit tests for internal SMB functions
|
||||
package smb
|
||||
|
||||
import "testing"
|
||||
|
||||
// TestIsPathDir tests the isPathDir function logic
|
||||
func TestIsPathDir(t *testing.T) {
|
||||
tests := []struct {
|
||||
path string
|
||||
expected bool
|
||||
}{
|
||||
// Empty path should be considered a directory
|
||||
{"", true},
|
||||
|
||||
// Paths with trailing slash should be directories
|
||||
{"/", true},
|
||||
{"share/", true},
|
||||
{"share/dir/", true},
|
||||
{"share/dir/subdir/", true},
|
||||
|
||||
// Paths without trailing slash should not be directories
|
||||
{"share", false},
|
||||
{"share/dir", false},
|
||||
{"share/dir/file", false},
|
||||
{"share/dir/subdir/file", false},
|
||||
|
||||
// Edge cases
|
||||
{"share//", true},
|
||||
{"share///", true},
|
||||
{"share/dir//", true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.path, func(t *testing.T) {
|
||||
result := isPathDir(tt.path)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isPathDir(%q) = %v, want %v", tt.path, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -561,6 +561,21 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// Fetch the base container's policy to be used if/when we need to create a
|
||||
// segments container to ensure we use the same policy.
|
||||
func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err := f.c.Container(ctx, container)
|
||||
|
||||
f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"]
|
||||
fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy)
|
||||
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
@@ -590,6 +605,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
||||
f.opt.UseSegmentsContainer.Valid = true
|
||||
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
|
||||
}
|
||||
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
@@ -927,6 +943,20 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
used = container.Bytes
|
||||
objects = container.Count
|
||||
total = container.QuotaBytes
|
||||
|
||||
if f.opt.UseSegmentsContainer.Value {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
segmentsContainer := f.rootContainer + segmentsContainerSuffix
|
||||
container, _, err = f.c.Container(ctx, segmentsContainer)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil && err != swift.ContainerNotFound {
|
||||
return nil, fmt.Errorf("container info failed: %w", err)
|
||||
}
|
||||
if err == nil {
|
||||
used += container.Bytes
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var containers []swift.Container
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -1132,6 +1162,13 @@ func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPat
|
||||
container: dstContainer,
|
||||
}
|
||||
if f.opt.UseSegmentsContainer.Value {
|
||||
if f.opt.StoragePolicy == "" {
|
||||
_, err = f.fetchStoragePolicy(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
su.container += segmentsContainerSuffix
|
||||
err = f.makeContainer(ctx, su.container)
|
||||
if err != nil {
|
||||
|
||||
@@ -56,6 +56,11 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
// Track how much space is used before we put our object.
|
||||
usage, err := f.About(ctx)
|
||||
require.NoError(t, err)
|
||||
usedBeforePut := *usage.Used
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
@@ -70,12 +75,20 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
file.Check(t, obj, f.Precision())
|
||||
|
||||
// Check how much space is used after the upload, should match the amount we
|
||||
// uploaded..
|
||||
usage, err = f.About(ctx)
|
||||
require.NoError(t, err)
|
||||
expectedUsed := usedBeforePut + obj.Size()
|
||||
require.EqualValues(t, expectedUsed, *usage.Used)
|
||||
|
||||
// Delete the object
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}
|
||||
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PolicyDiscovery", f.testPolicyDiscovery)
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
t.Run("WithChunk", f.testWithChunk)
|
||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||
@@ -104,12 +117,24 @@ func (f *Fs) testWithChunk(t *testing.T) {
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
// Track how much space is used before we put our object.
|
||||
ctx := context.TODO()
|
||||
usage, err := f.About(ctx)
|
||||
require.NoError(t, err)
|
||||
usedBeforePut := *usage.Used
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
ctx := context.TODO()
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, obj)
|
||||
|
||||
// Check how much space is used after the upload, should match the amount we
|
||||
// uploaded..
|
||||
usage, err = f.About(ctx)
|
||||
require.NoError(t, err)
|
||||
expectedUsed := usedBeforePut + obj.Size()
|
||||
require.EqualValues(t, expectedUsed, *usage.Used)
|
||||
}
|
||||
|
||||
func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
@@ -182,9 +207,14 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
uploadHash := hash.NewMultiHasher()
|
||||
in := io.TeeReader(buf, uploadHash)
|
||||
|
||||
// Track how much space is used before we put our object.
|
||||
ctx := context.TODO()
|
||||
usage, err := f.About(ctx)
|
||||
require.NoError(t, err)
|
||||
usedBeforePut := *usage.Used
|
||||
|
||||
file.Size = -1
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
ctx := context.TODO()
|
||||
obj, err := f.Features().PutStream(ctx, in, obji)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, obj)
|
||||
@@ -193,6 +223,59 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, objTarget)
|
||||
require.Equal(t, obj.Size(), objTarget.Size())
|
||||
|
||||
// Check how much space is used after the upload, should match the amount we
|
||||
// uploaded *and* the copy.
|
||||
usage, err = f.About(ctx)
|
||||
require.NoError(t, err)
|
||||
expectedUsed := usedBeforePut + obj.Size() + objTarget.Size()
|
||||
require.EqualValues(t, expectedUsed, *usage.Used)
|
||||
}
|
||||
|
||||
func (f *Fs) testPolicyDiscovery(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
container := "testPolicyDiscovery-1"
|
||||
// Reset the policy so we can test if it is populated.
|
||||
f.opt.StoragePolicy = ""
|
||||
err := f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Default policy for SAIO image is 1replica.
|
||||
assert.Equal(t, "1replica", f.opt.StoragePolicy)
|
||||
|
||||
// Create a container using a non-default policy, and check to ensure
|
||||
// that the created segments container uses the same non-default policy.
|
||||
policy := "Policy-1"
|
||||
container = "testPolicyDiscovery-2"
|
||||
|
||||
f.opt.StoragePolicy = policy
|
||||
err = f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset the policy so we can test if it is populated, and set to the
|
||||
// non-default policy.
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
|
||||
// Test that when a segmented upload container is made, the newly
|
||||
// created container inherits the non-default policy of the base
|
||||
// container.
|
||||
f.opt.StoragePolicy = ""
|
||||
f.opt.UseSegmentsContainer.Value = true
|
||||
su, err := f.newSegmentedUpload(ctx, container, "")
|
||||
require.NoError(t, err)
|
||||
// The container name we expected?
|
||||
segmentsContainer := container + segmentsContainerSuffix
|
||||
assert.Equal(t, segmentsContainer, su.container)
|
||||
// The policy we expected?
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, su.container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -104,6 +104,19 @@ type File struct {
|
||||
} `json:"processing"`
|
||||
}
|
||||
|
||||
// FolderSize represents the API object describing the sizes of a files and subfolders of a folder.
|
||||
type FolderSize struct {
|
||||
FilesSize int64 `json:"files_size"`
|
||||
FilesCount int64 `json:"files_count"`
|
||||
FoldersCount int64 `json:"folders_count"`
|
||||
}
|
||||
|
||||
// FolderSizes describes the subfolder sizes of a single folder.
|
||||
type FolderSizes struct {
|
||||
Direct FolderSize `json:"direct"`
|
||||
Recursive FolderSize `json:"recursive"`
|
||||
}
|
||||
|
||||
// CreateFolderRequest represents the JSON API object
|
||||
// that's sent to the create folder API endpoint.
|
||||
type CreateFolderRequest struct {
|
||||
@@ -126,6 +139,9 @@ type ListFilesResponse struct {
|
||||
Items []File `json:"items"`
|
||||
}
|
||||
|
||||
// FolderSizesResponse represents the response from the folder-sizes endpoint.
|
||||
type FolderSizesResponse map[string]FolderSizes
|
||||
|
||||
// DeleteFoldersRequest represents the JSON API object
|
||||
// that's sent to the delete folders API endpoint.
|
||||
type DeleteFoldersRequest struct {
|
||||
|
||||
@@ -97,7 +97,8 @@ any root slug set.`,
|
||||
Advanced: true,
|
||||
Default: encoder.Display | encoder.EncodeInvalidUtf8 | encoder.EncodeBackSlash,
|
||||
},
|
||||
}})
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote uloz.to storage
|
||||
@@ -143,7 +144,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.rest.SetHeader("X-Auth-Token", f.opt.AppToken)
|
||||
|
||||
auth, err := f.authenticate(ctx)
|
||||
|
||||
if err != nil {
|
||||
return f, err
|
||||
}
|
||||
@@ -178,6 +178,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, err
|
||||
}
|
||||
|
||||
// About implements the Abouter interface for Uloz.to.
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
used, err := f.getUsedSize(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage := fs.Usage{
|
||||
Used: &used,
|
||||
}
|
||||
|
||||
return &usage, nil
|
||||
}
|
||||
|
||||
// errorHandler parses a non 2xx error response into an error
|
||||
func errorHandler(resp *http.Response) error {
|
||||
// Decode error response
|
||||
@@ -253,7 +267,6 @@ func (f *Fs) authenticate(ctx context.Context) (response *api.AuthenticateRespon
|
||||
httpResp, err := f.rest.CallJSON(ctx, &opts, &authRequest, &response)
|
||||
return f.shouldRetry(ctx, httpResp, err, false)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -263,6 +276,32 @@ func (f *Fs) authenticate(ctx context.Context) (response *api.AuthenticateRespon
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUsedSize(ctx context.Context) (int64, error) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: fmt.Sprintf("/v6/user/%s/folder/%s/folder-sizes", f.opt.Username, rootID),
|
||||
Parameters: url.Values{
|
||||
"recursive": []string{"true"},
|
||||
},
|
||||
}
|
||||
|
||||
folderSizes := api.FolderSizesResponse{}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &folderSizes)
|
||||
return f.shouldRetry(ctx, resp, err, true)
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return folderSizes[rootID].Recursive.FilesSize, nil
|
||||
}
|
||||
|
||||
// UploadSession represents a single Uloz.to upload session.
|
||||
//
|
||||
// Uloz.to supports uploading multiple files at once and committing them atomically. This functionality isn't being used
|
||||
@@ -310,7 +349,6 @@ func (session *UploadSession) renewUploadSession(ctx context.Context) error {
|
||||
httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &createUploadURLReq, &response)
|
||||
return session.Filesystem.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -324,14 +362,12 @@ func (session *UploadSession) renewUploadSession(ctx context.Context) error {
|
||||
|
||||
func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info fs.ObjectInfo, payload io.Reader) (fs.Object, error) {
|
||||
session, err := f.createUploadSession(ctx)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashes := hash.NewHashSet(hash.MD5, hash.SHA256)
|
||||
hasher, err := hash.NewMultiHasherTypes(hashes)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -360,7 +396,6 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
|
||||
httpResp, err := f.cdn.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -386,7 +421,6 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
|
||||
}
|
||||
|
||||
encodedMetadata, err := metadata.encode()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -412,7 +446,6 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
|
||||
httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &updateReq, &updateResponse)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -438,7 +471,6 @@ func (f *Fs) uploadUnchecked(ctx context.Context, name, parentSlug string, info
|
||||
httpResp, err := session.Filesystem.rest.CallJSON(ctx, &opts, &commitRequest, &commitResponse)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -468,7 +500,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
// Uloz.to allows to have multiple files of the same name in the same folder.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
filename, folderSlug, err := f.dirCache.FindPath(ctx, src.Remote(), true)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -484,7 +515,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
|
||||
func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error) {
|
||||
folders, err := f.fetchListFolderPage(ctx, slug, "", 1, 0)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -494,7 +524,6 @@ func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error
|
||||
}
|
||||
|
||||
files, err := f.fetchListFilePage(ctx, slug, "", 1, 0)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -509,13 +538,11 @@ func (f *Fs) isDirEmpty(ctx context.Context, slug string) (empty bool, err error
|
||||
// Rmdir implements the mandatory method fs.Fs.Rmdir.
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
slug, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
empty, err := f.isDirEmpty(ctx, slug)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -534,7 +561,6 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
httpResp, err := f.rest.CallJSON(ctx, &opts, req, nil)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -558,7 +584,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
filename, folderSlug, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -600,7 +625,6 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
httpResp, err := f.rest.CallJSON(ctx, &opts, &req, nil)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -741,7 +765,6 @@ func (o *Object) updateFileProperties(ctx context.Context, req any) (err error)
|
||||
httpResp, err := o.fs.rest.CallJSON(ctx, &opts, &req, &resp)
|
||||
return o.fs.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -870,7 +893,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
remote: o.Remote(),
|
||||
}
|
||||
newo, err := o.fs.PutUnchecked(ctx, in, info, options...)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -914,7 +936,6 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
|
||||
// The time the object was last modified on the server - a handwavy guess, but we don't have any better
|
||||
return o.remoteFsMtime
|
||||
|
||||
}
|
||||
|
||||
// Fs implements the mandatory method fs.Object.Fs
|
||||
@@ -1053,7 +1074,6 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Fi
|
||||
}
|
||||
|
||||
files, err := f.listFiles(ctx, folderSlug, filename)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1065,7 +1085,6 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Fi
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(ctx, folderSlug, filename)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1136,8 +1155,8 @@ func (f *Fs) fetchListFolderPage(
|
||||
folderSlug string,
|
||||
searchQuery string,
|
||||
limit int,
|
||||
offset int) (folders []api.Folder, err error) {
|
||||
|
||||
offset int,
|
||||
) (folders []api.Folder, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v9/user/" + f.opt.Username + "/folder/" + folderSlug + "/folder-list",
|
||||
@@ -1160,7 +1179,6 @@ func (f *Fs) fetchListFolderPage(
|
||||
httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1175,8 +1193,8 @@ func (f *Fs) fetchListFolderPage(
|
||||
func (f *Fs) listFolders(
|
||||
ctx context.Context,
|
||||
folderSlug string,
|
||||
searchQuery string) (folders []api.Folder, err error) {
|
||||
|
||||
searchQuery string,
|
||||
) (folders []api.Folder, err error) {
|
||||
targetPageSize := f.opt.ListPageSize
|
||||
lastPageSize := targetPageSize
|
||||
offset := 0
|
||||
@@ -1204,8 +1222,8 @@ func (f *Fs) fetchListFilePage(
|
||||
folderSlug string,
|
||||
searchQuery string,
|
||||
limit int,
|
||||
offset int) (folders []api.File, err error) {
|
||||
|
||||
offset int,
|
||||
) (folders []api.File, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/v8/user/" + f.opt.Username + "/folder/" + folderSlug + "/file-list",
|
||||
@@ -1227,7 +1245,6 @@ func (f *Fs) fetchListFilePage(
|
||||
httpResp, err := f.rest.CallJSON(ctx, &opts, nil, &respBody)
|
||||
return f.shouldRetry(ctx, httpResp, err, true)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list files: %w", err)
|
||||
}
|
||||
@@ -1242,8 +1259,8 @@ func (f *Fs) fetchListFilePage(
|
||||
func (f *Fs) listFiles(
|
||||
ctx context.Context,
|
||||
folderSlug string,
|
||||
searchQuery string) (folders []api.File, err error) {
|
||||
|
||||
searchQuery string,
|
||||
) (folders []api.File, err error) {
|
||||
targetPageSize := f.opt.ListPageSize
|
||||
lastPageSize := targetPageSize
|
||||
offset := 0
|
||||
|
||||
@@ -25,6 +25,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-ntlmssp"
|
||||
"golang.org/x/sync/singleflight"
|
||||
|
||||
"github.com/rclone/rclone/backend/webdav/api"
|
||||
"github.com/rclone/rclone/backend/webdav/odrvcookie"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -35,11 +38,10 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
|
||||
ntlmssp "github.com/Azure/go-ntlmssp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -192,7 +194,7 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
BearerToken string `config:"bearer_token"`
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
BearerTokenCommand fs.SpaceSepList `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
@@ -226,6 +228,7 @@ type Fs struct {
|
||||
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
||||
chunksUploadURL string // upload URL for nextcloud chunked
|
||||
canChunk bool // set if nextcloud and nextcloud_chunk_size is set
|
||||
authSingleflight *singleflight.Group
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -282,7 +285,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
||||
return false, err
|
||||
}
|
||||
// If we have a bearer token command and it has expired then refresh it
|
||||
if f.opt.BearerTokenCommand != "" && resp != nil && resp.StatusCode == 401 {
|
||||
if len(f.opt.BearerTokenCommand) != 0 && resp != nil && resp.StatusCode == 401 {
|
||||
fs.Debugf(f, "Bearer token expired: %v", err)
|
||||
authErr := f.fetchAndSetBearerToken()
|
||||
if authErr != nil {
|
||||
@@ -476,13 +479,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
precision: fs.ModTimeNotSupported,
|
||||
authSingleflight: new(singleflight.Group),
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
@@ -515,7 +519,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||
} else if opt.BearerToken != "" {
|
||||
f.setBearerToken(opt.BearerToken)
|
||||
} else if f.opt.BearerTokenCommand != "" {
|
||||
} else if len(f.opt.BearerTokenCommand) != 0 {
|
||||
err = f.fetchAndSetBearerToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -562,12 +566,11 @@ func (f *Fs) setBearerToken(token string) {
|
||||
}
|
||||
|
||||
// fetch the bearer token using the command
|
||||
func (f *Fs) fetchBearerToken(cmd string) (string, error) {
|
||||
func (f *Fs) fetchBearerToken(cmd fs.SpaceSepList) (string, error) {
|
||||
var (
|
||||
args = strings.Split(cmd, " ")
|
||||
stdout bytes.Buffer
|
||||
stderr bytes.Buffer
|
||||
c = exec.Command(args[0], args[1:]...)
|
||||
c = exec.Command(cmd[0], cmd[1:]...)
|
||||
)
|
||||
c.Stdout = &stdout
|
||||
c.Stderr = &stderr
|
||||
@@ -607,15 +610,18 @@ func (f *Fs) findHeader(headers fs.CommaSepList, find string) bool {
|
||||
|
||||
// fetch the bearer token and set it if successful
|
||||
func (f *Fs) fetchAndSetBearerToken() error {
|
||||
if f.opt.BearerTokenCommand == "" {
|
||||
return nil
|
||||
}
|
||||
token, err := f.fetchBearerToken(f.opt.BearerTokenCommand)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.setBearerToken(token)
|
||||
return nil
|
||||
_, err, _ := f.authSingleflight.Do("bearerToken", func() (interface{}, error) {
|
||||
if len(f.opt.BearerTokenCommand) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
token, err := f.fetchBearerToken(f.opt.BearerTokenCommand)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.setBearerToken(token)
|
||||
return nil, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// The WebDAV url can optionally be suffixed with a path. This suffix needs to be ignored for determining the temporary upload directory of chunks.
|
||||
@@ -882,30 +888,56 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
return list.WithListP(ctx, dir, f)
|
||||
}
|
||||
|
||||
// ListP lists the objects and directories of the Fs starting
|
||||
// from dir non recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||
list := list.NewHelper(callback)
|
||||
var iErr error
|
||||
_, err = f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
_, err := f.listAll(ctx, dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||
if isDir {
|
||||
d := fs.NewDir(remote, time.Time(info.Modified))
|
||||
// .SetID(info.ID)
|
||||
// FIXME more info from dir? can set size, items?
|
||||
entries = append(entries, d)
|
||||
err := list.Add(d)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
err = list.Add(o)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
return iErr
|
||||
}
|
||||
return entries, nil
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Creates from the parameters passed in a half finished Object which
|
||||
@@ -1628,6 +1660,7 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.ListPer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -514,11 +514,12 @@ func (f *Fs) mkDirs(ctx context.Context, path string) (err error) {
|
||||
if apiErr.ErrorName != "DiskPathPointsToExistentDirectoryError" {
|
||||
// 2 if it fails then create all directories in the path from root.
|
||||
dirs := strings.Split(dirString, "/") //path separator
|
||||
var mkdirpath = "/" //path separator /
|
||||
var mkdirpath strings.Builder
|
||||
mkdirpath.WriteString("/") //path separator /
|
||||
for _, element := range dirs {
|
||||
if element != "" {
|
||||
mkdirpath += element + "/" //path separator /
|
||||
_ = f.CreateDir(ctx, mkdirpath) // ignore errors while creating dirs
|
||||
mkdirpath.WriteString(element + "/") //path separator /
|
||||
_ = f.CreateDir(ctx, mkdirpath.String()) // ignore errors while creating dirs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ var osarches = []string{
|
||||
"plan9/amd64",
|
||||
"solaris/amd64",
|
||||
// "js/wasm", // Rclone is too big for js/wasm until https://github.com/golang/go/issues/64856 is fixed
|
||||
"aix/ppc64",
|
||||
}
|
||||
|
||||
// Special environment flags for a given arch
|
||||
|
||||
@@ -32,6 +32,7 @@ docs = [
|
||||
"fichier.md",
|
||||
"alias.md",
|
||||
"s3.md",
|
||||
"archive.md",
|
||||
"b2.md",
|
||||
"box.md",
|
||||
"cache.md",
|
||||
|
||||
@@ -14,4 +14,4 @@ if [ -z "$globs" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs
|
||||
docker run --rm -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting
|
||||
|
||||
//go:build ruleguard
|
||||
// +build ruleguard
|
||||
|
||||
// Package gorules implementing custom linting rules using ruleguard
|
||||
package gorules
|
||||
|
||||
@@ -5,6 +5,10 @@ import (
|
||||
// Active commands
|
||||
_ "github.com/rclone/rclone/cmd"
|
||||
_ "github.com/rclone/rclone/cmd/about"
|
||||
_ "github.com/rclone/rclone/cmd/archive"
|
||||
_ "github.com/rclone/rclone/cmd/archive/create"
|
||||
_ "github.com/rclone/rclone/cmd/archive/extract"
|
||||
_ "github.com/rclone/rclone/cmd/archive/list"
|
||||
_ "github.com/rclone/rclone/cmd/authorize"
|
||||
_ "github.com/rclone/rclone/cmd/backend"
|
||||
_ "github.com/rclone/rclone/cmd/bisync"
|
||||
|
||||
40
cmd/archive/archive.go
Normal file
40
cmd/archive/archive.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Package archive implements 'rclone archive'.
|
||||
package archive
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(Command)
|
||||
}
|
||||
|
||||
// Command - archive command
|
||||
var Command = &cobra.Command{
|
||||
Use: "archive <action> [opts] <source> [<destination>]",
|
||||
Short: `Perform an action on an archive.`,
|
||||
Long: `Perform an action on an archive. Requires the use of a
|
||||
subcommand to specify the protocol, e.g.
|
||||
|
||||
rclone archive list remote:file.zip
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
|
||||
See [rclone archive create](/commands/rclone_archive_create/) for the
|
||||
archive formats supported.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.72",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("archive requires an action, e.g. 'rclone archive list remote:'")
|
||||
}
|
||||
return errors.New("unknown action")
|
||||
},
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user