mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 12:23:15 +00:00
Compare commits
69 Commits
fix-8815-b
...
cluster
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab60a77aba | ||
|
|
09535a06f7 | ||
|
|
173b720173 | ||
|
|
6660e6ec7c | ||
|
|
14c604335e | ||
|
|
bfcb23b7b2 | ||
|
|
46dbdb8cb7 | ||
|
|
17932fcc38 | ||
|
|
77faa787e1 | ||
|
|
0701dd55cd | ||
|
|
1903b4c1a2 | ||
|
|
f7cbcf556f | ||
|
|
3581e628c0 | ||
|
|
62c41bf449 | ||
|
|
c5864e113b | ||
|
|
39259a5bd1 | ||
|
|
2e376eb3b9 | ||
|
|
de8e9d4693 | ||
|
|
710cf49bc6 | ||
|
|
8dacac60ea | ||
|
|
3a80d4d4b4 | ||
|
|
a531f987a8 | ||
|
|
e906b8d0c4 | ||
|
|
a5932ef91a | ||
|
|
3afa563eaf | ||
|
|
9d9654b31f | ||
|
|
cfe257f13d | ||
|
|
0375efbd35 | ||
|
|
cad1954213 | ||
|
|
604e37caa5 | ||
|
|
b249d384b9 | ||
|
|
04e91838db | ||
|
|
94829aaec5 | ||
|
|
f574e3395c | ||
|
|
2bc155a96a | ||
|
|
adc8ea3427 | ||
|
|
068eea025c | ||
|
|
4510aa679a | ||
|
|
79281354c7 | ||
|
|
f57a178719 | ||
|
|
44f2e2ed39 | ||
|
|
13e1752d94 | ||
|
|
bb82c0e43b | ||
|
|
1af7151e73 | ||
|
|
fd63478ed6 | ||
|
|
5133b05c74 | ||
|
|
6ba96ede4b | ||
|
|
2896973964 | ||
|
|
be123d85ff | ||
|
|
b1b9562ab7 | ||
|
|
5146b66569 | ||
|
|
8898372d5a | ||
|
|
091fe9e453 | ||
|
|
8fdb68e41a | ||
|
|
c124aa2ed3 | ||
|
|
54e8bb89f7 | ||
|
|
50c1b594ab | ||
|
|
72437a9ca2 | ||
|
|
8ed55c61e1 | ||
|
|
bd598c1ceb | ||
|
|
7e30665102 | ||
|
|
d44957a09c | ||
|
|
37524e2dea | ||
|
|
2f6a6c8233 | ||
|
|
4ad40b6554 | ||
|
|
4f33d64f25 | ||
|
|
519623d9f1 | ||
|
|
913278327b | ||
|
|
a9b05e4c7a |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -291,7 +291,7 @@ jobs:
|
||||
README.md
|
||||
RELEASE.md
|
||||
CODE_OF_CONDUCT.md
|
||||
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
docs/content/{authors,bugs,changelog,cluster,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||
|
||||
- name: Scan edits of autogenerated files
|
||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||
|
||||
@@ -628,7 +628,7 @@ You'll need to modify the following files
|
||||
- `backend/s3/s3.go`
|
||||
- Add the provider to `providerOption` at the top of the file
|
||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||
- `docs/content/s3.md`
|
||||
- Add the provider at the top of the page.
|
||||
|
||||
12
Makefile
12
Makefile
@@ -114,21 +114,21 @@ release_dep_linux:
|
||||
# Update dependencies
|
||||
showupdates:
|
||||
@echo "*** Direct dependencies that could be updated ***"
|
||||
@GO111MODULE=on go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
@go list -u -f '{{if (and (not (or .Main .Indirect)) .Update)}}{{.Path}}: {{.Version}} -> {{.Update.Version}}{{end}}' -m all 2> /dev/null
|
||||
|
||||
# Update direct dependencies only
|
||||
updatedirect:
|
||||
GO111MODULE=on go get -d $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
GO111MODULE=on go mod tidy
|
||||
go get $$(go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all)
|
||||
go mod tidy
|
||||
|
||||
# Update direct and indirect dependencies and test dependencies
|
||||
update:
|
||||
GO111MODULE=on go get -d -u -t ./...
|
||||
GO111MODULE=on go mod tidy
|
||||
go get -u -t ./...
|
||||
go mod tidy
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
go mod tidy
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ directories to and from different cloud storage providers.
|
||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
@@ -95,6 +96,7 @@ directories to and from different cloud storage providers.
|
||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
|
||||
@@ -1313,10 +1313,29 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
srcURL := srcObj.fileClient().URL()
|
||||
fc := f.fileClient(remote)
|
||||
_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
startCopy, err := fc.StartCopyFromURL(ctx, srcURL, &opt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy failed: %w", err)
|
||||
}
|
||||
|
||||
// Poll for completion if necessary
|
||||
//
|
||||
// The for loop is never executed for same storage account copies.
|
||||
copyStatus := startCopy.CopyStatus
|
||||
var properties file.GetPropertiesResponse
|
||||
pollTime := 100 * time.Millisecond
|
||||
|
||||
for copyStatus != nil && string(*copyStatus) == string(file.CopyStatusTypePending) {
|
||||
time.Sleep(pollTime)
|
||||
|
||||
properties, err = fc.GetProperties(ctx, &file.GetPropertiesOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copyStatus = properties.CopyStatus
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
|
||||
|
||||
@@ -252,6 +252,9 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia",
|
||||
}, {
|
||||
Value: "us-east5",
|
||||
Help: "Ohio",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon",
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -59,31 +60,43 @@ const (
|
||||
configVersion = 1
|
||||
|
||||
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
defaultClientID = "jottacli"
|
||||
defaultClientID = "jottacli" // Identified as "Jottacloud CLI" in "My logged in devices"
|
||||
|
||||
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
|
||||
onlimeCloudTokenURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/token"
|
||||
onlimeCloudAuthURL = "https://cloud-auth.onlime.dk/auth/realms/onlime_wl/protocol/openid-connect/auth"
|
||||
onlimeCloudClientID = "desktop"
|
||||
)
|
||||
|
||||
type service struct {
|
||||
key string
|
||||
name string
|
||||
domain string
|
||||
realm string
|
||||
clientID string
|
||||
scopes []string
|
||||
}
|
||||
|
||||
// The list of services and their settings for supporting traditional OAuth.
|
||||
// Please keep these in alphabetical order, but with jottacloud first.
|
||||
func getServices() []service {
|
||||
return []service{
|
||||
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||
}
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
// needs to be done early so we can use oauth during config
|
||||
@@ -159,36 +172,44 @@ func init() {
|
||||
}
|
||||
|
||||
// Config runs the backend configuration protocol
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch config.State {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
switch conf.State {
|
||||
case "":
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Select authentication type.`, []fs.OptionExample{{
|
||||
if isAuthorize, _ := m.Get(config.ConfigAuthorize); isAuthorize == "true" {
|
||||
return nil, errors.New("not supported by this backend")
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||
Value: "standard",
|
||||
Help: "Standard authentication.\nUse this if you're a normal Jottacloud user.",
|
||||
Help: `Standard authentication.
|
||||
This is primarily supported by the official service, but may also be
|
||||
supported by some white-label services. It is designed for command-line
|
||||
applications, and you will be asked to enter a single-use personal login
|
||||
token which you must manually generate from the account security settings
|
||||
in the web interface of your service.`,
|
||||
}, {
|
||||
Value: "traditional",
|
||||
Help: `Traditional authentication.
|
||||
This is supported by the official service and all white-label services
|
||||
that rclone knows about. You will be asked which service to connect to.
|
||||
It has a limitation of only a single active authentication at a time. You
|
||||
need to be on, or have access to, a machine with an internet-connected
|
||||
web browser.`,
|
||||
}, {
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}, {
|
||||
Value: "onlime",
|
||||
Help: "Onlime Cloud authentication.\nUse this if you are using Onlime Cloud.",
|
||||
Help: `Legacy authentication.
|
||||
This is no longer supported by any known services and not recommended
|
||||
used. You will be asked for your account's username and password.`,
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
return fs.ConfigGoto(config.Result)
|
||||
return fs.ConfigGoto(conf.Result)
|
||||
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\nGenerate here: https://www.jottacloud.com/web/secure")
|
||||
return fs.ConfigInput("standard_token", "config_login_token", `Personal login token.
|
||||
Generate it from the account security settings in the web interface of your
|
||||
service, for the official service on https://www.jottacloud.com/web/secure.`)
|
||||
case "standard_token":
|
||||
loginToken := config.Result
|
||||
loginToken := conf.Result
|
||||
m.Set(configClientID, defaultClientID)
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
@@ -203,10 +224,50 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "traditional":
|
||||
services := getServices()
|
||||
options := make([]fs.OptionExample, 0, len(services))
|
||||
for _, service := range services {
|
||||
options = append(options, fs.OptionExample{
|
||||
Value: service.key,
|
||||
Help: service.name,
|
||||
})
|
||||
}
|
||||
return fs.ConfigChooseExclusiveFixed("traditional_type", "config_traditional",
|
||||
"White-label service. This decides the domain name to connect to and\nthe authentication configuration to use.",
|
||||
options)
|
||||
case "traditional_type":
|
||||
services := getServices()
|
||||
i := slices.IndexFunc(services, func(s service) bool { return s.key == conf.Result })
|
||||
if i == -1 {
|
||||
return nil, fmt.Errorf("unexpected service %q", conf.Result)
|
||||
}
|
||||
service := services[i]
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://" + service.domain + "/auth/realms/" + service.realm + "/.well-known/openid-configuration",
|
||||
}
|
||||
var wellKnown api.WellKnown
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get authentication provider configuration: %w", err)
|
||||
}
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, service.clientID)
|
||||
m.Set(configTokenURL, wellKnown.TokenEndpoint)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: wellKnown.AuthorizationEndpoint,
|
||||
TokenURL: wellKnown.TokenEndpoint,
|
||||
ClientID: service.clientID,
|
||||
Scopes: service.scopes,
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
|
||||
|
||||
Rclone has it's own Jottacloud API KEY which works fine as long as one
|
||||
only uses rclone on a single machine. When you want to use rclone with
|
||||
this account on more than one machine it's recommended to create a
|
||||
@@ -214,7 +275,7 @@ machine specific API key. These keys can NOT be shared between
|
||||
machines.`)
|
||||
case "legacy_api":
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
if config.Result == "true" {
|
||||
if conf.Result == "true" {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register device: %w", err)
|
||||
@@ -223,16 +284,16 @@ machines.`)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
}
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
|
||||
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address) of your account.")
|
||||
case "legacy_username":
|
||||
m.Set(configUsername, config.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
|
||||
m.Set(configUsername, conf.Result)
|
||||
return fs.ConfigPassword("legacy_password", "config_password", "Password of your account. This is only used in setup, it will not be stored.")
|
||||
case "legacy_password":
|
||||
m.Set("password", config.Result)
|
||||
m.Set("password", conf.Result)
|
||||
m.Set("auth_code", "")
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_auth_code":
|
||||
authCode := strings.ReplaceAll(config.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
authCode := strings.ReplaceAll(conf.Result, "-", "") // remove any "-" contained in the code so we have a 6 digit number
|
||||
m.Set("auth_code", authCode)
|
||||
return fs.ConfigGoto("legacy_do_auth")
|
||||
case "legacy_do_auth":
|
||||
@@ -242,12 +303,12 @@ machines.`)
|
||||
authCode, _ := m.Get("auth_code")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
|
||||
@@ -260,7 +321,7 @@ machines.`)
|
||||
}
|
||||
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
|
||||
if err == errAuthCodeRequired {
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification code.\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
|
||||
}
|
||||
m.Set("password", "")
|
||||
m.Set("auth_code", "")
|
||||
@@ -272,58 +333,6 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "onlime": // onlime cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, onlimeCloudClientID)
|
||||
m.Set(configTokenURL, onlimeCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauthutil.Config{
|
||||
AuthURL: onlimeCloudAuthURL,
|
||||
TokenURL: onlimeCloudTokenURL,
|
||||
ClientID: onlimeCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", `Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
@@ -331,7 +340,7 @@ section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.`)
|
||||
|
||||
case "choose_device_query":
|
||||
if config.Result != "true" {
|
||||
if conf.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
return fs.ConfigGoto("end")
|
||||
@@ -372,7 +381,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return deviceNames[i], ""
|
||||
})
|
||||
case "choose_device_result":
|
||||
device := config.Result
|
||||
device := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -432,7 +441,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return dev.MountPoints[i].Name, ""
|
||||
})
|
||||
case "choose_device_mountpoint":
|
||||
mountpoint := config.Result
|
||||
mountpoint := conf.Result
|
||||
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
@@ -463,7 +472,7 @@ You may create a new by entering a unique name.`, device)
|
||||
|
||||
if isNew {
|
||||
if device == defaultDevice {
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device", defaultDevice)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
@@ -478,7 +487,7 @@ You may create a new by entering a unique name.`, device)
|
||||
// All the config flows end up here in case we need to carry on with something
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
return nil, fmt.Errorf("unknown state %q", conf.State)
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -929,12 +938,12 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
|
||||
oauthConfig.AuthURL = tokenURL
|
||||
}
|
||||
} else if ver == legacyConfigVersion {
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID, _ := m.Get(configClientID)
|
||||
if clientID == "" {
|
||||
clientID = legacyClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret, _ := m.Get(configClientSecret)
|
||||
if clientSecret == "" {
|
||||
clientSecret = legacyEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
@@ -1000,6 +1009,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features.ListR = nil
|
||||
}
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
@@ -1009,13 +1025,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return err
|
||||
})
|
||||
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||
"github.com/henrybear327/go-proton-api"
|
||||
|
||||
"github.com/pquerna/otp/totp"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -87,6 +89,17 @@ The value can also be provided with --protondrive-2fa=000000
|
||||
The 2FA code of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "otp_secret_key",
|
||||
Help: `The OTP secret key
|
||||
|
||||
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
|
||||
|
||||
The OTP secret key of your proton drive account if the account is set up with
|
||||
two-factor authentication`,
|
||||
Required: false,
|
||||
Sensitive: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: clientUIDKey,
|
||||
Help: "Client uid key (internal use only)",
|
||||
@@ -191,6 +204,7 @@ type Options struct {
|
||||
Password string `config:"password"`
|
||||
MailboxPassword string `config:"mailbox_password"`
|
||||
TwoFA string `config:"2fa"`
|
||||
OtpSecretKey string `config:"otp_secret_key"`
|
||||
|
||||
// advanced
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
@@ -356,7 +370,15 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
|
||||
config.FirstLoginCredential.Username = opt.Username
|
||||
config.FirstLoginCredential.Password = opt.Password
|
||||
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
|
||||
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
|
||||
config.FirstLoginCredential.TwoFA = opt.TwoFA
|
||||
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
|
||||
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
|
||||
}
|
||||
config.FirstLoginCredential.TwoFA = code
|
||||
}
|
||||
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
|
||||
@@ -395,6 +417,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
if opt.OtpSecretKey != "" {
|
||||
var err error
|
||||
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
1036
backend/s3/s3.go
1036
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -561,6 +561,21 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// Fetch the base container's policy to be used if/when we need to create a
|
||||
// segments container to ensure we use the same policy.
|
||||
func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) {
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
var rxHeaders swift.Headers
|
||||
_, rxHeaders, err := f.c.Container(ctx, container)
|
||||
|
||||
f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"]
|
||||
fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy)
|
||||
|
||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
||||
})
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||
// and authenticated connection.
|
||||
//
|
||||
@@ -590,6 +605,7 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
||||
f.opt.UseSegmentsContainer.Valid = true
|
||||
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
|
||||
}
|
||||
|
||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists - ignoring directory markers
|
||||
var info swift.Object
|
||||
@@ -1132,6 +1148,13 @@ func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPat
|
||||
container: dstContainer,
|
||||
}
|
||||
if f.opt.UseSegmentsContainer.Value {
|
||||
if f.opt.StoragePolicy == "" {
|
||||
_, err = f.fetchStoragePolicy(ctx, dstContainer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
su.container += segmentsContainerSuffix
|
||||
err = f.makeContainer(ctx, su.container)
|
||||
if err != nil {
|
||||
|
||||
@@ -76,6 +76,7 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
||||
|
||||
// Additional tests that aren't in the framework
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PolicyDiscovery", f.testPolicyDiscovery)
|
||||
t.Run("NoChunk", f.testNoChunk)
|
||||
t.Run("WithChunk", f.testWithChunk)
|
||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||
@@ -195,4 +196,50 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
||||
require.Equal(t, obj.Size(), objTarget.Size())
|
||||
}
|
||||
|
||||
func (f *Fs) testPolicyDiscovery(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
container := "testPolicyDiscovery-1"
|
||||
// Reset the policy so we can test if it is populated.
|
||||
f.opt.StoragePolicy = ""
|
||||
err := f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Default policy for SAIO image is 1replica.
|
||||
assert.Equal(t, "1replica", f.opt.StoragePolicy)
|
||||
|
||||
// Create a container using a non-default policy, and check to ensure
|
||||
// that the created segments container uses the same non-default policy.
|
||||
policy := "Policy-1"
|
||||
container = "testPolicyDiscovery-2"
|
||||
|
||||
f.opt.StoragePolicy = policy
|
||||
err = f.makeContainer(ctx, container)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Reset the policy so we can test if it is populated, and set to the
|
||||
// non-default policy.
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
|
||||
// Test that when a segmented upload container is made, the newly
|
||||
// created container inherits the non-default policy of the base
|
||||
// container.
|
||||
f.opt.StoragePolicy = ""
|
||||
f.opt.UseSegmentsContainer.Value = true
|
||||
su, err := f.newSegmentedUpload(ctx, container, "")
|
||||
require.NoError(t, err)
|
||||
// The container name we expected?
|
||||
segmentsContainer := container + segmentsContainerSuffix
|
||||
assert.Equal(t, segmentsContainer, su.container)
|
||||
// The policy we expected?
|
||||
f.opt.StoragePolicy = ""
|
||||
_, err = f.fetchStoragePolicy(ctx, su.container)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -23,15 +23,15 @@ func init() {
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize <fs name> [base64_json_blob | client_id client_secret]",
|
||||
Use: "authorize <backendname> [base64_json_blob | client_id client_secret]",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
rclone from a machine with a browser. Use as instructed by rclone config.
|
||||
See also the [remote setup documentation](/remote_setup).
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Name of a backend (e.g. "drive", "s3")
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
func TestAuthorizeCommand(t *testing.T) {
|
||||
// Test that the Use string is correctly formatted
|
||||
if commandDefinition.Use != "authorize <fs name> [base64_json_blob | client_id client_secret]" {
|
||||
if commandDefinition.Use != "authorize <backendname> [base64_json_blob | client_id client_secret]" {
|
||||
t.Errorf("Command Use string doesn't match expected format: %s", commandDefinition.Use)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestAuthorizeCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
helpOutput := buf.String()
|
||||
if !strings.Contains(helpOutput, "authorize <fs name>") {
|
||||
if !strings.Contains(helpOutput, "authorize <backendname>") {
|
||||
t.Errorf("Help output doesn't contain correct usage information")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,15 +4,19 @@ package bilib
|
||||
import (
|
||||
"bytes"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
)
|
||||
|
||||
// CaptureOutput runs a function capturing its output at log level INFO.
|
||||
func CaptureOutput(fun func()) []byte {
|
||||
var mu sync.Mutex
|
||||
buf := &bytes.Buffer{}
|
||||
oldLevel := log.Handler.SetLevel(slog.LevelInfo)
|
||||
log.Handler.SetOutput(func(level slog.Level, text string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
buf.WriteString(text)
|
||||
})
|
||||
defer func() {
|
||||
@@ -20,5 +24,7 @@ func CaptureOutput(fun func()) []byte {
|
||||
log.Handler.SetLevel(oldLevel)
|
||||
}()
|
||||
fun()
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
@@ -707,8 +707,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
|
||||
// clear stats so we only do this once
|
||||
accounting.MaxCompletedTransfers = 0
|
||||
accounting.Stats(ctx).PruneTransfers()
|
||||
accounting.Stats(ctx).RemoveDoneTransfers()
|
||||
}
|
||||
|
||||
if b.DebugName != "" {
|
||||
|
||||
@@ -245,10 +245,8 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
||||
}
|
||||
}
|
||||
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
if accounting.MaxCompletedTransfers != -1 {
|
||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
||||
}
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
accounting.Stats(ctxCopy).SetMaxCompletedTransfers(-1) // we need a complete list in the event of graceful shutdown
|
||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||
b.testFn()
|
||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
|
||||
17
cmd/cmd.go
17
cmd/cmd.go
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/cluster"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/config/configflags"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
@@ -481,6 +482,22 @@ func initConfig() {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Run as a cluster worker if configured, otherwise ignoring
|
||||
// the command given on the command line
|
||||
if ci.Cluster != "" {
|
||||
if ci.ClusterID == "" || ci.ClusterID == "0" {
|
||||
fs.Infof(nil, "Running in cluster mode %q as controller", ci.ClusterID)
|
||||
} else {
|
||||
fs.Infof(nil, "Running in cluster mode %q as worker with id %q", ci.ClusterID, ci.ClusterID)
|
||||
worker, err := cluster.NewWorker(ctx)
|
||||
if err != nil || worker == nil {
|
||||
fs.Fatalf(nil, "Failed to start cluster worker: %v", err)
|
||||
}
|
||||
// Do not continue with the main thread
|
||||
select {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resolveExitCode(err error) {
|
||||
|
||||
@@ -37,6 +37,7 @@ func init() {
|
||||
configCommand.AddCommand(configDisconnectCommand)
|
||||
configCommand.AddCommand(configUserInfoCommand)
|
||||
configCommand.AddCommand(configEncryptionCommand)
|
||||
configCommand.AddCommand(configStringCommand)
|
||||
}
|
||||
|
||||
var configCommand = &cobra.Command{
|
||||
@@ -613,3 +614,55 @@ If the config file is not encrypted it will return a non zero exit code.`, "|",
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var configStringCommand = &cobra.Command{
|
||||
Use: "string <remote>",
|
||||
Short: `Print connection string for a single remote.`,
|
||||
Long: strings.ReplaceAll(`Print a connection string for a single remote.
|
||||
|
||||
The [connection strings](/docs/#connection-strings) can be used
|
||||
wherever a remote is needed and can be more convenient than using the
|
||||
config file, especially if using the RC API.
|
||||
|
||||
Backend parameters may be provided to the command also.
|
||||
|
||||
Example:
|
||||
|
||||
|||sh
|
||||
$ rclone config string s3:rclone --s3-no-check-bucket
|
||||
:s3,access_key_id=XXX,no_check_bucket,provider=AWS,region=eu-west-2,secret_access_key=YYY:rclone
|
||||
|||
|
||||
|
||||
**NB** the strings are not quoted for use in shells (eg bash,
|
||||
powershell, windows cmd). Most will work if enclosed in "double
|
||||
quotes", however connection strings that contain double quotes will
|
||||
require further quoting which is very shell dependent.
|
||||
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.72",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
remote := args[0]
|
||||
fsInfo, _, fsPath, m, err := fs.ConfigFs(remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find the overridden options and construct the string
|
||||
overridden := fsInfo.Options.NonDefault(m)
|
||||
var out strings.Builder
|
||||
out.WriteRune(':')
|
||||
out.WriteString(fsInfo.Name)
|
||||
config := overridden.Human()
|
||||
if config != "" {
|
||||
out.WriteRune(',')
|
||||
out.WriteString(config)
|
||||
}
|
||||
out.WriteRune(':')
|
||||
out.WriteString(fsPath)
|
||||
fmt.Println(out.String())
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cluster"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
@@ -38,6 +39,8 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
"groups": "RC",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if rc.Opt.Enabled {
|
||||
fs.Fatalf(nil, "Don't supply --rc flag when using rcd")
|
||||
@@ -49,6 +52,12 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
rc.Opt.Files = args[0]
|
||||
}
|
||||
|
||||
// Start the cluster worker if configured
|
||||
_, err := cluster.NewWorker(ctx)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start cluster worker: %v", err)
|
||||
}
|
||||
|
||||
s, err := rcserver.Start(context.Background(), &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||
|
||||
@@ -41,9 +41,10 @@ var OptionsInfo = fs.Options{}.
|
||||
|
||||
// Options required for http server
|
||||
type Options struct {
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Template libhttp.TemplateConfig
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Template libhttp.TemplateConfig
|
||||
DisableZip bool
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
@@ -69,6 +70,7 @@ func init() {
|
||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
flagSet.BoolVar(&Opt.DisableZip, "disable-zip", false, "Disable zip download of directories")
|
||||
cmdserve.Command.AddCommand(Command)
|
||||
cmdserve.AddRc("http", func(ctx context.Context, f fs.Fs, in rc.Params) (cmdserve.Handle, error) {
|
||||
// Read VFS Opts
|
||||
@@ -257,6 +259,24 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
||||
return
|
||||
}
|
||||
dir := node.(*vfs.Dir)
|
||||
|
||||
if r.URL.Query().Get("download") == "zip" && !s.opt.DisableZip {
|
||||
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
|
||||
zipName := path.Base(dirRemote)
|
||||
if dirRemote == "" {
|
||||
zipName = "root"
|
||||
}
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
|
||||
w.Header().Set("Content-Type", "application/zip")
|
||||
w.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
|
||||
err := vfs.CreateZip(ctx, dir, w)
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, w, "Failed to create zip", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
dirEntries, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
|
||||
@@ -280,6 +300,8 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
||||
// Set the Last-Modified header to the timestamp
|
||||
w.Header().Set("Last-Modified", dir.ModTime().UTC().Format(http.TimeFormat))
|
||||
|
||||
directory.DisableZip = s.opt.DisableZip
|
||||
|
||||
directory.Serve(w, r)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"io"
|
||||
stdfs "io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -75,6 +76,16 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string)
|
||||
return s, testURL
|
||||
}
|
||||
|
||||
// setAllModTimes walks root and sets atime/mtime to t for every file & directory.
|
||||
func setAllModTimes(root string, t time.Time) error {
|
||||
return filepath.WalkDir(root, func(path string, d stdfs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(path, t, t)
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
datedObject = "two.txt"
|
||||
expectedTime = time.Date(2000, 1, 2, 3, 4, 5, 0, time.UTC)
|
||||
@@ -123,6 +134,8 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
|
||||
f = nil
|
||||
} else {
|
||||
// set all the mod times to expectedTime
|
||||
require.NoError(t, setAllModTimes("testdata/files", expectedTime))
|
||||
// Create a test Fs
|
||||
var err error
|
||||
f, err = fs.NewFs(context.Background(), "testdata/files")
|
||||
@@ -233,6 +246,16 @@ func testGET(t *testing.T, useProxy bool) {
|
||||
Range: "bytes=3-",
|
||||
Golden: "testdata/golden/two3-.txt",
|
||||
},
|
||||
{
|
||||
URL: "/?download=zip",
|
||||
Status: http.StatusOK,
|
||||
Golden: "testdata/golden/root.zip",
|
||||
},
|
||||
{
|
||||
URL: "/three/?download=zip",
|
||||
Status: http.StatusOK,
|
||||
Golden: "testdata/golden/three.zip",
|
||||
},
|
||||
} {
|
||||
method := test.Method
|
||||
if method == "" {
|
||||
|
||||
BIN
cmd/serve/http/testdata/golden/root.zip
vendored
Normal file
BIN
cmd/serve/http/testdata/golden/root.zip
vendored
Normal file
Binary file not shown.
BIN
cmd/serve/http/testdata/golden/three.zip
vendored
Normal file
BIN
cmd/serve/http/testdata/golden/three.zip
vendored
Normal file
Binary file not shown.
@@ -2,6 +2,7 @@ package s3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/gofakes3"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -12,25 +13,23 @@ type logger struct{}
|
||||
|
||||
// print log message
|
||||
func (l logger) Print(level gofakes3.LogLevel, v ...any) {
|
||||
var s string
|
||||
if len(v) == 0 {
|
||||
s = ""
|
||||
} else {
|
||||
var ok bool
|
||||
s, ok = v[0].(string)
|
||||
if !ok {
|
||||
s = fmt.Sprint(v[0])
|
||||
var b strings.Builder
|
||||
for i := range v {
|
||||
if i > 0 {
|
||||
fmt.Fprintf(&b, " ")
|
||||
}
|
||||
v = v[1:]
|
||||
fmt.Fprint(&b, v[i])
|
||||
}
|
||||
s := b.String()
|
||||
|
||||
switch level {
|
||||
default:
|
||||
fallthrough
|
||||
case gofakes3.LogErr:
|
||||
fs.Errorf("serve s3", s, v...)
|
||||
fs.Errorf("serve s3", s)
|
||||
case gofakes3.LogWarn:
|
||||
fs.Infof("serve s3", s, v...)
|
||||
fs.Infof("serve s3", s)
|
||||
case gofakes3.LogInfo:
|
||||
fs.Debugf("serve s3", s, v...)
|
||||
fs.Debugf("serve s3", s)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,6 +125,7 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
||||
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
|
||||
{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}}
|
||||
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
||||
{{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||
@@ -133,9 +134,11 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
||||
{{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}}
|
||||
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
|
||||
{{< provider name="Hetzner Object Storage" home="https://www.hetzner.com/storage/object-storage/" config="/s3/#hetzner" >}}
|
||||
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
||||
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
|
||||
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
||||
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||
@@ -179,7 +182,10 @@ WebDAV or S3, that work out of the box.)
|
||||
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="Quatrix by Maytech" home="https://www.maytech.net/products/quatrix-business" config="/quatrix/" >}}
|
||||
{{< provider name="Rabata Cloud Storage" home="https://rabata.io" config="/s3/#Rabata" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
{{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
||||
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
||||
|
||||
@@ -1015,3 +1015,9 @@ put them back in again.` >}}
|
||||
- russcoss <russcoss@outlook.com>
|
||||
- Matt LaPaglia <mlapaglia@gmail.com>
|
||||
- Youfu Zhang <1315097+zhangyoufu@users.noreply.github.com>
|
||||
- juejinyuxitu <juejinyuxitu@outlook.com>
|
||||
- iTrooz <hey@itrooz.fr>
|
||||
- Microscotch <github.com@microscotch.net>
|
||||
- Andrew Ruthven <andrew@etc.gen.nz>
|
||||
- spiffytech <git@spiffy.tech>
|
||||
- Dulani Woods <Dulani@gmail.com>
|
||||
|
||||
@@ -84,7 +84,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Box. This only runs from the moment it opens
|
||||
|
||||
@@ -6,6 +6,31 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.71.1 - 2025-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.71.1)
|
||||
|
||||
- Bug Fixes
|
||||
- bisync: Fix error handling for renamed conflicts (nielash)
|
||||
- march: Fix deadlock when using --fast-list on syncs (Nick Craig-Wood)
|
||||
- operations: Fix partial name collisions for non --inplace copies (Nick Craig-Wood)
|
||||
- pacer: Fix deadlock with --max-connections (Nick Craig-Wood)
|
||||
- doc fixes (albertony, anon-pradip, Claudius Ellsel, dougal, Jean-Christophe Cura, Nick Craig-Wood, nielash)
|
||||
- Mount
|
||||
- Do not log successful unmount as an error (Tilman Vogel)
|
||||
- VFS
|
||||
- Fix SIGHUP killing serve instead of flushing directory caches (dougal)
|
||||
- Local
|
||||
- Fix rmdir "Access is denied" on windows (nielash)
|
||||
- Box
|
||||
- Fix about after change in API return (Nick Craig-Wood)
|
||||
- Combine
|
||||
- Propagate SlowHash feature (skbeh)
|
||||
- Drive
|
||||
- Update making your own client ID instructions (Ed Craig-Wood)
|
||||
- Internet Archive
|
||||
- Fix server side copy files with spaces (Nick Craig-Wood)
|
||||
|
||||
## v1.71.0 - 2025-08-22
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.0...v1.71.0)
|
||||
|
||||
217
docs/content/cluster.md
Normal file
217
docs/content/cluster.md
Normal file
@@ -0,0 +1,217 @@
|
||||
---
|
||||
title: "Cluster"
|
||||
description: "Clustering rclone"
|
||||
versionIntroduced: "v1.72"
|
||||
---
|
||||
|
||||
# Cluster
|
||||
|
||||
Rclone has a cluster mode invoked with the `--cluster` flag. This
|
||||
enables a group of rclone instances to work together on doing a sync.
|
||||
|
||||
This is controlled by a group of flags starting with `--cluster-` and
|
||||
enabled with the `--cluster` flag.
|
||||
|
||||
```text
|
||||
--cluster string Enable cluster mode with remote to use as shared storage
|
||||
--cluster-batch-files int Max number of files for a cluster batch (default 1000)
|
||||
--cluster-batch-size SizeSuffix Max size of files for a cluster batch (default 1Ti)
|
||||
--cluster-cleanup ClusterCleanup Control which cluster files get cleaned up (default full)
|
||||
--cluster-id string Set to an ID for the cluster. An ID of 0 or empty becomes the controller
|
||||
--cluster-quit-workers Set to cause the controller to quit the workers when it finished
|
||||
```
|
||||
|
||||
The command might look something like this which is a normal rclone
|
||||
command but with a new `--cluster` flag which points at an rclone
|
||||
remote defining the cluster storage. This is the signal to rclone that
|
||||
it should engage the cluster mode with a controller and workers.
|
||||
|
||||
```sh
|
||||
rclone copy source: destination: --flags --cluster /work
|
||||
rclone copy source: destination: --flags --cluster s3:bucket
|
||||
```
|
||||
|
||||
This works only with the `rclone sync`, `copy` and `move` commands.
|
||||
|
||||
If the remote specified by the `--cluster` command is inside the
|
||||
`source:` or `destination:` it must be excluded with the filter flags.
|
||||
|
||||
Any rclone remotes used in the transfer must be defined in all cluster
|
||||
nodes. Defining remotes with connection strings will get around that
|
||||
problem.
|
||||
|
||||
## Terminology
|
||||
|
||||
The cluster has two logical groups, the controller and the workers.
|
||||
There is one controller and many workers.
|
||||
|
||||
The controller and the workers will communicate with each other by
|
||||
creating files in the remote pointed to by the `--cluster` flag. This
|
||||
could be for example an S3 bucket or a Kubernetes PVC.
|
||||
|
||||
The files are JSON serialized rc commands. Multiple commands are sent
|
||||
using `rc/batch`. The commands flow `pending` →`processing` → `done` →
|
||||
`finished`
|
||||
|
||||
```text
|
||||
└── queue
|
||||
├── pending ← pending task files created by the controller
|
||||
├── processing ← claimed tasks being executed by a worker
|
||||
├── done ← finished tasks awaiting the controller to read the result
|
||||
└── finished ← completed task files
|
||||
```
|
||||
|
||||
The cluster can be set up in two ways as a persistent cluster or as a
|
||||
transient cluster.
|
||||
|
||||
### Persistent cluster
|
||||
|
||||
Run a cluster of workers using
|
||||
|
||||
```sh
|
||||
rclone rcd --cluster /work
|
||||
```
|
||||
|
||||
Then run rclone commands when required on the cluster:
|
||||
|
||||
```sh
|
||||
rclone copy source: destination: --flags --cluster /work
|
||||
```
|
||||
|
||||
In this mode there can be many rclone commands executing at once.
|
||||
|
||||
### Transient cluster
|
||||
|
||||
Run many copies of rclone simultaneously, for example in a Kubernetes
|
||||
indexed job.
|
||||
|
||||
The rclone with `--cluster-id 0` becomes the controller and the others
|
||||
become the workers. For a Kubernetes indexed job, setting
|
||||
`--cluster-id $(JOB_COMPLETION_INDEX)` would work well.
|
||||
|
||||
Add the `--cluster-quit-workers` flag - this will cause the controller
|
||||
to make sure the workers exit when it has finished.
|
||||
|
||||
All instances of rclone run a command like this so the whole cluster
|
||||
can only run one rclone command:
|
||||
|
||||
```sh
|
||||
rclone copy source: destination: --flags --cluster /work --cluster-id $(JOB_COMPLETION_INDEX) --cluster-quit-workers
|
||||
```
|
||||
|
||||
## Controller
|
||||
|
||||
The controller runs the sync and work distribution.
|
||||
|
||||
- It does the listing of the source and destination directories
|
||||
comparing files in order to find files which need to be transferred.
|
||||
- Files which need to be transferred are then batched into jobs of
|
||||
`--cluster-batch-files` files to transfer or `--cluster-batch-size`
|
||||
max size in `queue/pending` for the workers to pick up.
|
||||
- It watches `queue/done` for finished jobs and updates the transfer
|
||||
statistics and logs any errors, accordingly moving the job to
|
||||
`queue/finished`.
|
||||
|
||||
Once the sync is complete, if `--cluster-quit-workers` is set, then it
|
||||
sends the workers a special command which causes them all to exit.
|
||||
|
||||
The controller only sends transfer jobs to the workers. All the other
|
||||
tasks (eg listing, comparing) are done by the controller. The
|
||||
controller does not execute any transfer tasks itself.
|
||||
|
||||
The controller reads worker status as written to `queue/status` and
|
||||
will detect workers which have stopped. If it detects a failed worker
|
||||
then it will re-assign any outstanding work.
|
||||
|
||||
## Workers
|
||||
|
||||
The workers job is entirely to act as API endpoints that receive their
|
||||
work via files in `/work`. Then
|
||||
|
||||
- Read work in `queue/pending`
|
||||
- Attempt to rename into `queue/processing`
|
||||
- If the cluster work directory supports atomic renames, then use
|
||||
those, otherwise read the file, write the copy, delete the original.
|
||||
If the delete fails then the rename was not successful (possible on
|
||||
s3 backends).
|
||||
- If successful then do that item of work. If not successful another
|
||||
worker got there first and sleep for a bit then retry.
|
||||
- After the copy is complete then remove the `queue/processing` file
|
||||
or rename it into `queue/finished` if the `--cluster-cleanup` flag
|
||||
allows it.
|
||||
- Repeat
|
||||
|
||||
Every second the worker will write a status file in `queue/status` to
|
||||
be read by the controller.
|
||||
|
||||
## Layout of the work directory
|
||||
|
||||
The format of the files in this directory may change without notice
|
||||
but the layout is documented here as it can help debugging.
|
||||
|
||||
```text
|
||||
/work - root of the work directory
|
||||
└── queue - files to control the queue
|
||||
├── done - job files that are finished and read
|
||||
├── finished - job files that are finished but not yet read
|
||||
├── pending - job files that are not started yet
|
||||
├── processing - job files that are running
|
||||
└── status - worker status files
|
||||
```
|
||||
|
||||
If debugging use `--cluster-cleanup none` to leave the completed files
|
||||
in the directory layout.
|
||||
|
||||
## Flags
|
||||
|
||||
### --cluster string
|
||||
|
||||
This enables the cluster mode. Without this flag, all the other
|
||||
cluster flags are ignored. This should be given a remote which can be
|
||||
a local directory, eg `/work` or a remote directory, eg `s3:bucket`.
|
||||
|
||||
### --cluster-batch-files int
|
||||
|
||||
This controls the number of files copied in a cluster batch. Setting
|
||||
this larger may be more efficient but it means the statistics will be
|
||||
less accurate on the controller (default 1000).
|
||||
|
||||
### --cluster-batch-size SizeSuffix
|
||||
|
||||
This controls the total size of files in a cluster batch. If the size
|
||||
of the files in a batch exceeds this number then the batch will be
|
||||
sent to the workers. Setting this larger may be more efficient but it
|
||||
means the statistics will be less accurate on the controller. (default
|
||||
1TiB)
|
||||
|
||||
### --cluster-cleanup ClusterCleanup
|
||||
|
||||
Controls which cluster files get cleaned up.
|
||||
|
||||
- `full` - clean all work files (default)
|
||||
- `completed` - clean completed work files but leave the errors and status
|
||||
- `none` - leave all the file (useful for debugging)
|
||||
|
||||
### --cluster-id string
|
||||
|
||||
Set an ID for the rclone instance. This can be a string or a number.
|
||||
An ID of 0 will become the controller otherwise the instance will
|
||||
become a worker. If this flag isn't supplied or the value is empty,
|
||||
then a random string will be used instead.
|
||||
|
||||
### --cluster-quit-workers
|
||||
|
||||
If this flag is set, then when the controller finishes its sync task
|
||||
it will quit all the workers before it exits.
|
||||
|
||||
## Not implemented
|
||||
|
||||
Here are some features from the original design which are not
|
||||
implemented yet:
|
||||
|
||||
- the controller will not notice if workers die or fail to complete
|
||||
their tasks
|
||||
- the controller does not re-assign the workers work if necessary
|
||||
- the controller does not restart the sync
|
||||
- the workers do not write any status files (but the stats are
|
||||
correctly accounted)
|
||||
@@ -384,6 +384,9 @@ does not work on Windows.)
|
||||
rclone copy ':http,url="https://example.com":path/to/dir' /tmp/dir
|
||||
```
|
||||
|
||||
You can use [rclone config string](/commands/rclone_config_string/) to
|
||||
convert a remote into a connection string.
|
||||
|
||||
#### Connection strings, config and logging
|
||||
|
||||
If you supply extra configuration to a backend by command line flag,
|
||||
@@ -3312,6 +3315,15 @@ For the remote control options and for instructions on how to remote control rcl
|
||||
|
||||
See [the remote control section](/rc/).
|
||||
|
||||
## Cluster
|
||||
|
||||
For the cluster options and for instructions on how to cluster rclone:
|
||||
|
||||
- `--cluster`
|
||||
- Anything starting with `--cluster-`
|
||||
|
||||
See the [cluster section](/cluster/).
|
||||
|
||||
## Logging
|
||||
|
||||
rclone has 4 levels of logging, `ERROR`, `NOTICE`, `INFO` and `DEBUG`.
|
||||
|
||||
@@ -96,7 +96,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if using web browser to automatically
|
||||
|
||||
@@ -60,7 +60,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Dropbox. This only
|
||||
|
||||
@@ -100,7 +100,9 @@ Choose a number from below, or type in your own value
|
||||
\ "us-east1"
|
||||
13 / Northern Virginia.
|
||||
\ "us-east4"
|
||||
14 / Oregon.
|
||||
14 / Ohio.
|
||||
\ "us-east5"
|
||||
15 / Oregon.
|
||||
\ "us-west1"
|
||||
location> 12
|
||||
The storage class to use when storing objects in Google Cloud Storage.
|
||||
@@ -147,7 +149,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if using web browser to automatically
|
||||
|
||||
@@ -97,7 +97,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Google if using web browser to automatically
|
||||
|
||||
@@ -72,7 +72,7 @@ and hence should not be shared with other persons.**
|
||||
See the [below section](#keeping-your-tokens-safe) for more information.
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from HiDrive. This only runs from the moment it opens
|
||||
|
||||
@@ -7,106 +7,171 @@ versionIntroduced: "v1.43"
|
||||
# {{< icon "fa fa-cloud" >}} Jottacloud
|
||||
|
||||
Jottacloud is a cloud storage service provider from a Norwegian company, using
|
||||
its own datacenters in Norway. In addition to the official service at
|
||||
[jottacloud.com](https://www.jottacloud.com/), it also provides white-label
|
||||
solutions to different companies, such as:
|
||||
its own datacenters in Norway.
|
||||
|
||||
In addition to the official service at [jottacloud.com](https://www.jottacloud.com/),
|
||||
it also provides white-label solutions to different companies. The following
|
||||
are currently supported by this backend, using a different authentication setup
|
||||
as described [below](#whitelabel-authentication):
|
||||
|
||||
- Elkjøp (with subsidiaries):
|
||||
- Elkjøp Cloud (cloud.elkjop.no)
|
||||
- Elgiganten Cloud (cloud.elgiganten.dk)
|
||||
- Elgiganten Cloud (cloud.elgiganten.se)
|
||||
- ELKO Cloud (cloud.elko.is)
|
||||
- Gigantti Cloud (cloud.gigantti.fi)
|
||||
- Telia
|
||||
- Telia Cloud (cloud.telia.se)
|
||||
- Telia Sky (sky.telia.no)
|
||||
- Tele2
|
||||
- Tele2 Cloud (mittcloud.tele2.se)
|
||||
- Onlime
|
||||
- Onlime Cloud Storage (onlime.dk)
|
||||
- Elkjøp (with subsidiaries):
|
||||
- Elkjøp Cloud (cloud.elkjop.no)
|
||||
- Elgiganten Sweden (cloud.elgiganten.se)
|
||||
- Elgiganten Denmark (cloud.elgiganten.dk)
|
||||
- Giganti Cloud (cloud.gigantti.fi)
|
||||
- ELKO Cloud (cloud.elko.is)
|
||||
|
||||
Most of the white-label versions are supported by this backend, although may
|
||||
require different authentication setup - described below.
|
||||
- Onlime (onlime.dk)
|
||||
- MediaMarkt
|
||||
- MediaMarkt Cloud (mediamarkt.jottacloud.com)
|
||||
- Let's Go Cloud (letsgo.jotta.cloud)
|
||||
|
||||
Paths are specified as `remote:path`
|
||||
|
||||
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
||||
|
||||
## Authentication types
|
||||
## Authentication
|
||||
|
||||
Some of the whitelabel versions uses a different authentication method than the
|
||||
official service, and you have to choose the correct one when setting up the remote.
|
||||
Authentication in Jottacloud is in general based on OAuth and OpenID Connect
|
||||
(OIDC). There are different variants to choose from, depending on which service
|
||||
you are using, e.g. a white-label service may only support one of them. Note
|
||||
that there is no documentation to rely on, so the descriptions provided here
|
||||
are based on observations and may not be accurate.
|
||||
|
||||
### Standard authentication
|
||||
Jottacloud uses two optional OAuth security mechanisms, referred to as "Refresh
|
||||
Token Rotation" and "Automatic Reuse Detection", which has some implications.
|
||||
Access tokens normally have one hour expiry, after which they need to be
|
||||
refreshed (rotated), an operation that requires the refresh token to be
|
||||
supplied. Rclone does this automatically. This is standard OAuth. But in
|
||||
Jottacloud, such a refresh operation not only creates a new access token, but
|
||||
also refresh token, and invalidates the existing refresh token, the one that
|
||||
was supplied. It keeps track of the history of refresh tokens, sometimes
|
||||
referred to as a token family, descending from the original refresh token that
|
||||
was issued after the initial authentication. This is used to detect any
|
||||
attempts at reusing old refresh tokens, and trigger an immedate invalidation of
|
||||
the current refresh token, and effectively the entire refresh token family.
|
||||
|
||||
The standard authentication method used by the official service (jottacloud.com),
|
||||
as well as some of the whitelabel services, requires you to generate a single-use
|
||||
personal login token from the account security settings in the service's web
|
||||
interface. Log in to your account, go to "Settings" and then "Security", or use
|
||||
the direct link presented to you by rclone when configuring the remote:
|
||||
<https://www.jottacloud.com/web/secure>. Scroll down to the section "Personal login
|
||||
token", and click the "Generate" button. Note that if you are using a whitelabel
|
||||
service you probably can't use the direct link, you need to find the same page in
|
||||
their dedicated web interface, and also it may be in a different location than
|
||||
described above.
|
||||
|
||||
To access your account from multiple instances of rclone, you need to configure
|
||||
each of them with a separate personal login token. E.g. you create a Jottacloud
|
||||
remote with rclone in one location, and copy the configuration file to a second
|
||||
location where you also want to run rclone and access the same remote. Then you
|
||||
need to replace the token for one of them, using the [config reconnect](https://rclone.org/commands/rclone_config_reconnect/)
|
||||
command, which requires you to generate a new personal login token and supply
|
||||
as input. If you do not do this, the token may easily end up being invalidated,
|
||||
resulting in both instances failing with an error message something along the
|
||||
When the current refresh token has been invalidated, next time rclone tries to
|
||||
perform a token refresh, it will fail with an error message something along the
|
||||
lines of:
|
||||
|
||||
```text
|
||||
oauth2: cannot fetch token: 400 Bad Request
|
||||
Response: {"error":"invalid_grant","error_description":"Stale token"}
|
||||
CRITICAL: Failed to create file system for "remote:": (...): couldn't fetch token: invalid_grant: maybe token expired? - try refreshing with "rclone config reconnect remote:"
|
||||
```
|
||||
|
||||
When this happens, you need to replace the token as described above to be able
|
||||
to use your remote again.
|
||||
If you run rclone with verbosity level 2 (`-vv`), you will see a debug message
|
||||
with an additional error description from the OAuth response:
|
||||
|
||||
All personal login tokens you have taken into use will be listed in the web
|
||||
interface under "My logged in devices", and from the right side of that list
|
||||
you can click the "X" button to revoke individual tokens.
|
||||
```text
|
||||
DEBUG : remote: got fatal oauth error: oauth2: "invalid_grant" "Session doesn't have required client"
|
||||
```
|
||||
|
||||
### Legacy authentication
|
||||
(The error description used to be "Stale token" instead of "Session doesn't
|
||||
have required client", so you may see references to that in older descriptions
|
||||
of this situation.)
|
||||
|
||||
If you are using one of the whitelabel versions (e.g. from Elkjøp) you may not
|
||||
have the option to generate a CLI token. In this case you'll have to use the
|
||||
legacy authentication. To do this select yes when the setup asks for legacy
|
||||
authentication and enter your username and password. The rest of the setup is
|
||||
identical to the default setup.
|
||||
When this happens, you need to re-authenticate to be able to use your remote
|
||||
again, e.g. using the [config reconnect](/commands/rclone_config_reconnect/)
|
||||
command as suggested in the error message. This will create an entirely new
|
||||
refresh token (family).
|
||||
|
||||
### Telia Cloud authentication
|
||||
A typical example of how you may end up in this situation, is if you create
|
||||
a Jottacloud remote with rclone in one location, and then copy the
|
||||
configuration file to a second location where you start using rclone to access
|
||||
the same remote. Eventually there will now be a token refresh attempt with an
|
||||
invalidated token, i.e. refresh token reuse, resulting in both instances
|
||||
starting to fail with the "invalid_grant" error. It is possible to copy remote
|
||||
configurations, but you must then replace the token for one of them using the
|
||||
[config reconnect](https://rclone.org/commands/rclone_config_reconnect/)
|
||||
command.
|
||||
|
||||
Similar to other whitelabel versions Telia Cloud doesn't offer the option of
|
||||
creating a CLI token, and additionally uses a separate authentication flow
|
||||
where the username is generated internally. To setup rclone to use Telia Cloud,
|
||||
choose Telia Cloud authentication in the setup. The rest of the setup is
|
||||
identical to the default setup.
|
||||
You can get some overview of your active tokens in your service's web user
|
||||
interface, if you navigate to "Settings" and then "Security" (in which case
|
||||
you end up at <https://www.jottacloud.com/web/secure> or similar). Down on
|
||||
that page you have a section "My logged in devices". This contains a list
|
||||
of entries which seemingly represents currently valid refresh tokens, or
|
||||
refresh token families. From the right side of that list you can click a
|
||||
button ("X") to revoke (invalidate) it, which means you will still have access
|
||||
using an existing access token until that expires, but you will not be able to
|
||||
perform a token refresh. Note that this entire "My logged in devices" feature
|
||||
seem to behave a bit differently with different authentication variants and
|
||||
with use of the different (white-label) services.
|
||||
|
||||
### Tele2 Cloud authentication
|
||||
### Standard
|
||||
|
||||
As Tele2-Com Hem merger was completed this authentication can be used for former
|
||||
Com Hem Cloud and Tele2 Cloud customers as no support for creating a CLI token
|
||||
exists, and additionally uses a separate authentication flow where the username
|
||||
is generated internally. To setup rclone to use Tele2 Cloud, choose Tele2 Cloud
|
||||
authentication in the setup. The rest of the setup is identical to the default setup.
|
||||
This is an OAuth variant designed for command-line applications. It is
|
||||
primarily supported by the official service (jottacloud.com), but may also be
|
||||
supported by some of the white-label services. The information necessary to be
|
||||
able to perform authentication, like domain name and endpoint to connect to,
|
||||
are found automatically (it is encoded into the supplied login token, described
|
||||
next), so you do not need to specify which service to configure.
|
||||
|
||||
### Onlime Cloud Storage authentication
|
||||
When configuring a remote, you are asked to enter a single-use personal login
|
||||
token, which you must manually generate from the account security settings in
|
||||
the service's web interface. You do not need a web browser on the same machine
|
||||
like with traditional OAuth, but need to use a web browser somewhere, and be
|
||||
able to be copy the generated string into your rclone configuration session.
|
||||
Log in to your service's web user interface, navigate to "Settings" and then
|
||||
"Security", or, for the official service, use the direct link presented to you
|
||||
by rclone when configuring the remote: <https://www.jottacloud.com/web/secure>.
|
||||
Scroll down to the section "Personal login token", and click the "Generate"
|
||||
button. Copy the presented string and paste it where rclone asks for it. Rclone
|
||||
will then use this to perform an initial token request, and receive a regular
|
||||
OAuth token which it stores in your remote configuration. There will then also
|
||||
be a new entry in the "My logged in devices" list in the web interface, with
|
||||
device name and application name "Jottacloud CLI".
|
||||
|
||||
Onlime has sold access to Jottacloud proper, while providing localized support
|
||||
to Danish Customers, but have recently set up their own hosting, transferring
|
||||
their customers from Jottacloud servers to their own ones.
|
||||
Each time a new token is created this way, i.e. a new personal login token is
|
||||
generated and traded in for an OAuth token, you get an entirely new refresh
|
||||
token family, with a new entry in the "My logged in devices". You can create as
|
||||
many remotes as you want, and use multiple instances of rclone on same or
|
||||
different machine, as long as you configure them separately like this, and not
|
||||
get your self into the refresh token reuse issue described above.
|
||||
|
||||
This, of course, necessitates using their servers for authentication, but
|
||||
otherwise functionality and architecture seems equivalent to Jottacloud.
|
||||
### Traditional
|
||||
|
||||
To setup rclone to use Onlime Cloud Storage, choose Onlime Cloud authentication
|
||||
in the setup. The rest of the setup is identical to the default setup.
|
||||
Jottacloud also supports a more traditional OAuth variant. Most of the
|
||||
white-label services support this, and for many of them this is the only
|
||||
alternative because they do not support personal login tokens. This method
|
||||
relies on pre-defined service-specific domain names and endpoints, and rclone
|
||||
need you to specify which service to configure. This also means that any
|
||||
changes to existing or additions of new white-label services needs an update
|
||||
in the rclone backend implementation.
|
||||
|
||||
When configuring a remote, you must interactively login to an OAuth
|
||||
authorization web site, and a one-time authorization code is sent back to
|
||||
rclone behind the scene, which it uses to request an OAuth token. This means
|
||||
that you need to be on a machine with an internet-connected web browser. If you
|
||||
need it on a machine where this is not the case, then you will have to create
|
||||
the configuration on a different machine and copy it from there. The Jottacloud
|
||||
backend does not support the `rclone authorize` command. See the
|
||||
[remote setup docs](/remote_setup) for details.
|
||||
|
||||
Jottacloud exerts some form of strict session management when authenticating
|
||||
using this method. This leads to some unexpected cases of the "invalid_grant"
|
||||
error described above, and effectively limits you to only use of a single
|
||||
active authentication on the same machine. I.e. you can only create a single
|
||||
rclone remote, and you can't even log in with the service's official desktop
|
||||
client while having a rclone remote configured, or else you will eventually get
|
||||
all sessions invalidated and are forced to re-authenticate.
|
||||
|
||||
When you have successfully authenticated, there will be an entry in the
|
||||
"My logged in devices" list in the web interface representing your session. It
|
||||
will typically be listed with application name "Jottacloud for Desktop" or
|
||||
similar (it depends on the white-label service configuration).
|
||||
|
||||
### Legacy
|
||||
|
||||
Originally Jottacloud used an OAuth variant which required your account's
|
||||
username and password to be specified. When Jottacloud migrated to the newer
|
||||
methods, some white-label versions (those from Elkjøp) still used this legacy
|
||||
method for a long time. Currently there are no known uses of this, it is still
|
||||
supported by rclone, but the support will be removed in a future version.
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -125,7 +190,10 @@ n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> remote
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
@@ -134,60 +202,63 @@ XX / Jottacloud
|
||||
\ (jottacloud)
|
||||
[snip]
|
||||
Storage> jottacloud
|
||||
|
||||
Option client_id.
|
||||
OAuth Client Id.
|
||||
Leave blank normally.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
client_id>
|
||||
|
||||
Option client_secret.
|
||||
OAuth Client Secret.
|
||||
Leave blank normally.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
client_secret>
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Option config_type.
|
||||
Select authentication type.
|
||||
Choose a number from below, or type in an existing string value.
|
||||
Type of authentication.
|
||||
Choose a number from below, or type in an existing value of type string.
|
||||
Press Enter for the default (standard).
|
||||
/ Standard authentication.
|
||||
1 | Use this if you're a normal Jottacloud user.
|
||||
| This is primarily supported by the official service, but may also be
|
||||
| supported by some white-label services. It is designed for command-line
|
||||
1 | applications, and you will be asked to enter a single-use personal login
|
||||
| token which you must manually generate from the account security settings
|
||||
| in the web interface of your service.
|
||||
\ (standard)
|
||||
/ Traditional authentication.
|
||||
| This is supported by the official service and all white-label services
|
||||
| that rclone knows about. You will be asked which service to connect to.
|
||||
2 | It has a limitation of only a single active authentication at a time. You
|
||||
| need to be on, or have access to, a machine with an internet-connected
|
||||
| web browser.
|
||||
\ (traditional)
|
||||
/ Legacy authentication.
|
||||
2 | This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.
|
||||
3 | This is no longer supported by any known services and not recommended
|
||||
| used. You will be asked for your account's username and password.
|
||||
\ (legacy)
|
||||
/ Telia Cloud authentication.
|
||||
3 | Use this if you are using Telia Cloud.
|
||||
\ (telia)
|
||||
/ Tele2 Cloud authentication.
|
||||
4 | Use this if you are using Tele2 Cloud.
|
||||
\ (tele2)
|
||||
/ Onlime Cloud authentication.
|
||||
5 | Use this if you are using Onlime Cloud.
|
||||
\ (onlime)
|
||||
config_type> 1
|
||||
|
||||
Option config_login_token.
|
||||
Personal login token.
|
||||
Generate here: https://www.jottacloud.com/web/secure
|
||||
Login Token> <your token here>
|
||||
Generate it from the account security settings in the web interface of your
|
||||
service, for the official service on https://www.jottacloud.com/web/secure.
|
||||
Enter a value.
|
||||
config_login_token> <your token here>
|
||||
|
||||
Use a non-standard device/mountpoint?
|
||||
Choosing no, the default, will let you access the storage used for the archive
|
||||
section of the official Jottacloud client. If you instead want to access the
|
||||
sync or the backup section, for example, you must choose yes.
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> y
|
||||
Option config_device.
|
||||
The device to use. In standard setup the built-in Jotta device is used,
|
||||
which contains predefined mountpoints for archive, sync etc. All other devices
|
||||
are treated as backup devices by the official Jottacloud client. You may create
|
||||
a new by entering a unique name.
|
||||
Choose a number from below, or type in your own string value.
|
||||
Press Enter for the default (DESKTOP-3H31129).
|
||||
1 > DESKTOP-3H31129
|
||||
2 > Jotta
|
||||
config_device> 2
|
||||
Option config_mountpoint.
|
||||
The mountpoint to use for the built-in device Jotta.
|
||||
The standard setup is to use the Archive mountpoint. Most other mountpoints
|
||||
have very limited support in rclone and should generally be avoided.
|
||||
Choose a number from below, or type in an existing string value.
|
||||
Press Enter for the default (Archive).
|
||||
1 > Archive
|
||||
2 > Shared
|
||||
3 > Sync
|
||||
config_mountpoint> 1
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: jottacloud
|
||||
|
||||
@@ -100,7 +100,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Microsoft. This only runs from the moment it
|
||||
|
||||
@@ -71,7 +71,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note if you are using remote config with rclone authorize while your pcloud
|
||||
server is the EU region, you will need to set the hostname in 'Edit advanced
|
||||
|
||||
@@ -65,7 +65,7 @@ y/e/d>
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from premiumize.me. This only runs from the moment it opens
|
||||
|
||||
@@ -131,6 +131,109 @@ section and followed by the privacy policy of Rclone.
|
||||
local configuration file.
|
||||
- Rclone does not share any user data with third parties.
|
||||
|
||||
## User Data Collection and Storage
|
||||
|
||||
This section outlines how rclone accesses, uses, stores, and shares
|
||||
user data obtained from service provider APIs. Our use of information
|
||||
received from provider APIs will adhere to the provider API Services
|
||||
User Data Policy, including the Limited Use requirements.
|
||||
|
||||
Rclone is a client-side command-line program that users run on their
|
||||
own computers to manage their files on cloud storage services. The
|
||||
rclone project does not operate any servers that store or process your
|
||||
personal data. All data access and processing occurs directly on the
|
||||
user's machine and between the user's machine and the provider API
|
||||
servers.
|
||||
|
||||
### Data Accessed
|
||||
|
||||
When you authorize rclone to access your files on your provider, it
|
||||
may access the following types of data, depending on the permissions
|
||||
you grant:
|
||||
|
||||
- Files: Rclone accesses the metadata (filenames, sizes, modification
|
||||
times, etc.) and content of your files and folders on your provider.
|
||||
This is necessary for rclone to perform file management tasks like
|
||||
copying, syncing, moving, and listing files.
|
||||
|
||||
- Authentication Tokens: Rclone requests OAuth 2.0 access tokens from
|
||||
the provider. These tokens are used to authenticate your requests to
|
||||
the provider's APIs and prove that you have granted rclone
|
||||
permission to access your data.
|
||||
|
||||
- Basic Profile Information: As part of the authentication process,
|
||||
rclone may receive your email address to identify the connected
|
||||
account within the rclone configuration.
|
||||
|
||||
### Data Usage
|
||||
|
||||
Rclone uses the user data it accesses solely to provide its core
|
||||
functionality, which is initiated and controlled entirely by you, the
|
||||
user. Specifically:
|
||||
|
||||
- The data is used to perform file transfer and management operations
|
||||
(such as `copy`, `sync`, `move`, `list`, `delete`) between your
|
||||
local machine and your provider account as per your direct commands.
|
||||
|
||||
- Authentication tokens are used exclusively to make authorized API
|
||||
calls to the provider's services on your behalf.
|
||||
|
||||
- Your email address is used locally to help you identify which
|
||||
provider account is configured.
|
||||
|
||||
Rclone does not use your data for any other purpose, such as
|
||||
advertising, marketing, or analysis by the rclone project developers.
|
||||
|
||||
### Data Sharing
|
||||
|
||||
Rclone does not share your user data with any third parties.
|
||||
|
||||
All data transfers initiated by the user occur directly between the
|
||||
machine where rclone is running and the provider's servers. The rclone
|
||||
project and its developers **never** have access to your
|
||||
authentication tokens or your file data.
|
||||
|
||||
### Data Storage & Protection
|
||||
|
||||
- Configuration Data: Rclone stores its configuration, including the
|
||||
OAuth 2.0 tokens required to access your provider account, in a
|
||||
configuration file (`rclone.conf`) located on your local machine.
|
||||
|
||||
- Security: You are responsible for securing this configuration
|
||||
file on your own computer. Rclone provides a built-in option to
|
||||
encrypt the configuration file with a password for an added layer of
|
||||
security. We strongly recommend using this feature.
|
||||
|
||||
- File Data: Your file data is only held in your computer's memory
|
||||
(RAM) temporarily during transfer operations. Rclone does not
|
||||
permanently store your file content on your local disk unless you
|
||||
explicitly command it to do so (e.g., by running a `copy` command
|
||||
from the provider to a local directory).
|
||||
|
||||
### Data Retention & Deletion
|
||||
|
||||
Rclone gives you full control over your data.
|
||||
|
||||
- Data Retention: Rclone retains the configuration data, including
|
||||
authentication tokens, on your local machine for as long as you keep
|
||||
the configuration file. This allows you to use rclone without having
|
||||
to re-authenticate for every session.
|
||||
|
||||
- Data Deletion: You can delete your data and revoke rclone's
|
||||
access at any time through one of the following methods:
|
||||
|
||||
1. Local Deletion: You can delete the specific provider
|
||||
configuration from your `rclone.conf` file or delete the entire
|
||||
file itself. This will permanently remove the authentication
|
||||
tokens from your machine.
|
||||
|
||||
2. Revoking Access via the provider: You can revoke rclone's
|
||||
access to your provider directly from your the providers's
|
||||
security settings page. This will invalidate the authentication
|
||||
tokens, and rclone will no longer be able to access your data.
|
||||
For example, if you are using Google you can manage your permissions
|
||||
[on the Google permissions page](https://myaccount.google.com/permissions).
|
||||
|
||||
## Resources & Further Information
|
||||
|
||||
- [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||
|
||||
@@ -80,7 +80,7 @@ e/n/d/r/c/s/q> q
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from put.io if using web browser to automatically
|
||||
|
||||
@@ -6,22 +6,23 @@ description: "Configuring rclone up on a remote / headless machine"
|
||||
# Configuring rclone on a remote / headless machine
|
||||
|
||||
Some of the configurations (those involving oauth2) require an
|
||||
Internet connected web browser.
|
||||
internet-connected web browser.
|
||||
|
||||
If you are trying to set rclone up on a remote or headless box with no
|
||||
browser available on it (e.g. a NAS or a server in a datacenter) then
|
||||
you will need to use an alternative means of configuration. There are
|
||||
two ways of doing it, described below.
|
||||
If you are trying to set rclone up on a remote or headless machine with no
|
||||
browser available on it (e.g. a NAS or a server in a datacenter), then
|
||||
you will need to use an alternative means of configuration. There are
|
||||
three ways of doing it, described below.
|
||||
|
||||
## Configuring using rclone authorize
|
||||
|
||||
On the headless box run `rclone` config but answer `N` to the `Use auto config?`
|
||||
question.
|
||||
On the headless machine run [rclone config](/commands/rclone_config), but
|
||||
answer `N` to the question `Use web browser to automatically authenticate rclone with remote?`.
|
||||
|
||||
```text
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
Use web browser to automatically authenticate rclone with remote?
|
||||
* Say Y if the machine running rclone has a web browser you can use
|
||||
* Say N if running rclone on a (remote) machine without web browser access
|
||||
If not sure try Y. If Y failed, try N.
|
||||
|
||||
y) Yes (default)
|
||||
n) No
|
||||
@@ -33,33 +34,35 @@ a web browser available.
|
||||
For more help and alternate methods see: https://rclone.org/remote_setup/
|
||||
Execute the following on the machine with the web browser (same rclone
|
||||
version recommended):
|
||||
rclone authorize "onedrive"
|
||||
rclone authorize "onedrive"
|
||||
Then paste the result.
|
||||
Enter a value.
|
||||
config_token>
|
||||
```
|
||||
|
||||
Then on your main desktop machine
|
||||
Then on your main desktop machine, run [rclone authorize](/commands/rclone_authorize/).
|
||||
|
||||
```text
|
||||
rclone authorize "onedrive"
|
||||
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
|
||||
Log in and authorize rclone for access
|
||||
Waiting for code...
|
||||
NOTICE: Make sure your Redirect URL is set to "http://localhost:53682/" in your custom config.
|
||||
NOTICE: If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx
|
||||
NOTICE: Log in and authorize rclone for access
|
||||
NOTICE: Waiting for code...
|
||||
|
||||
Got code
|
||||
Paste the following into your remote machine --->
|
||||
SECRET_TOKEN
|
||||
<---End paste
|
||||
```
|
||||
|
||||
Then back to the headless box, paste in the code
|
||||
Then back to the headless machine, paste in the code.
|
||||
|
||||
```text
|
||||
config_token> SECRET_TOKEN
|
||||
--------------------
|
||||
[acd12]
|
||||
client_id =
|
||||
client_secret =
|
||||
client_id =
|
||||
client_secret =
|
||||
token = SECRET_TOKEN
|
||||
--------------------
|
||||
y) Yes this is OK
|
||||
@@ -70,18 +73,19 @@ y/e/d>
|
||||
|
||||
## Configuring by copying the config file
|
||||
|
||||
Rclone stores all of its config in a single configuration file. This
|
||||
can easily be copied to configure a remote rclone.
|
||||
Rclone stores all of its configuration in a single file. This can easily be
|
||||
copied to configure a remote rclone (although some backends does not support
|
||||
reusing the same configuration, consult your backend documentation to be
|
||||
sure).
|
||||
|
||||
So first configure rclone on your desktop machine with
|
||||
Start by running [rclone config](/commands/rclone_config) to create the
|
||||
configuration file on your desktop machine.
|
||||
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
to set up the config file.
|
||||
|
||||
Find the config file by running `rclone config file`, for example
|
||||
Then locate the file by running [rclone config file](/commands/rclone_config_file).
|
||||
|
||||
```sh
|
||||
$ rclone config file
|
||||
@@ -89,31 +93,37 @@ Configuration file is stored at:
|
||||
/home/user/.rclone.conf
|
||||
```
|
||||
|
||||
Now transfer it to the remote box (scp, cut paste, ftp, sftp, etc.) and
|
||||
place it in the correct place (use `rclone config file` on the remote
|
||||
box to find out where).
|
||||
Finally, transfer the file to the remote machine (scp, cut paste, ftp, sftp, etc.)
|
||||
and place it in the correct location (use [rclone config file](/commands/rclone_config_file)
|
||||
on the remote machine to find out where).
|
||||
|
||||
## Configuring using SSH Tunnel
|
||||
|
||||
Linux and MacOS users can utilize SSH Tunnel to redirect the headless box
|
||||
port 53682 to local machine by using the following command:
|
||||
If you have an SSH client installed on your local machine, you can set up an
|
||||
SSH tunnel to redirect the port 53682 into the headless machine by using the
|
||||
following command:
|
||||
|
||||
```sh
|
||||
ssh -L localhost:53682:localhost:53682 username@remote_server
|
||||
```
|
||||
|
||||
Then on the headless box run `rclone config` and answer `Y` to the
|
||||
`Use auto config?` question.
|
||||
Then on the headless machine run [rclone config](/commands/rclone_config) and
|
||||
answer `Y` to the question `Use web browser to automatically authenticate rclone with remote?`.
|
||||
|
||||
```text
|
||||
Use auto config?
|
||||
* Say Y if not sure
|
||||
* Say N if you are working on a remote or headless machine
|
||||
Use web browser to automatically authenticate rclone with remote?
|
||||
* Say Y if the machine running rclone has a web browser you can use
|
||||
* Say N if running rclone on a (remote) machine without web browser access
|
||||
If not sure try Y. If Y failed, try N.
|
||||
|
||||
y) Yes (default)
|
||||
n) No
|
||||
y/n> y
|
||||
NOTICE: Make sure your Redirect URL is set to "http://localhost:53682/" in your custom config.
|
||||
NOTICE: If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx
|
||||
NOTICE: Log in and authorize rclone for access
|
||||
NOTICE: Waiting for code...
|
||||
```
|
||||
|
||||
Then copy and paste the auth url `http://127.0.0.1:53682/auth?state=xxxxxxxxxxxx`
|
||||
to the browser on your local machine, complete the auth and it is done.
|
||||
Finally, copy and paste the presented URL `http://127.0.0.1:53682/auth?state=xxxxxxxxxxxxxxxxxxxxxx`
|
||||
to the browser on your local machine, complete the auth and you are done.
|
||||
|
||||
@@ -21,7 +21,9 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||
{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}}
|
||||
{{< provider name="GCS" home="https://cloud.google.com/storage/docs" config="/s3/#google-cloud-storage" >}}
|
||||
{{< provider name="Hetzner" home="https://www.hetzner.com/storage/object-storage/" config="/s3/#hetzner" >}}
|
||||
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
|
||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||
@@ -38,6 +40,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="Rabata Cloud Storage" home="https://rabata.io" config="/s3/#Rabata" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
@@ -3400,6 +3403,150 @@ endpoint = https://storage.googleapis.com
|
||||
|
||||
This is Google bug [#312292516](https://issuetracker.google.com/u/0/issues/312292516).
|
||||
|
||||
### Hetzner Object Storage {#hetzner}
|
||||
|
||||
Here is an example of making a [Hetzner Object Storage](https://www.hetzner.com/storage/object-storage/)
|
||||
configuration. First run:
|
||||
|
||||
rclone config
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> my-hetzner
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[snip]
|
||||
XX / Hetzner Object Storage
|
||||
\ (Hetzner)
|
||||
[snip]
|
||||
provider> Hetzner
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth>
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> ACCESS_KEY
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> SECRET_KEY
|
||||
Option region.
|
||||
Region to connect to.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Helsinki
|
||||
\ (hel1)
|
||||
2 / Falkenstein
|
||||
\ (fsn1)
|
||||
3 / Nuremberg
|
||||
\ (nbg1)
|
||||
region>
|
||||
Option endpoint.
|
||||
Endpoint for Hetzner Object Storage
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Helsinki
|
||||
\ (hel1.your-objectstorage.com)
|
||||
2 / Falkenstein
|
||||
\ (fsn1.your-objectstorage.com)
|
||||
3 / Nuremberg
|
||||
\ (nbg1.your-objectstorage.com)
|
||||
endpoint>
|
||||
Option location_constraint.
|
||||
Location constraint - must be set to match the Region.
|
||||
Leave blank if not sure. Used when creating buckets only.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
location_constraint>
|
||||
Option acl.
|
||||
Canned ACL used when creating buckets and storing or copying objects.
|
||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
/ Owner gets FULL_CONTROL.
|
||||
1 | No one else has access rights (default).
|
||||
\ (private)
|
||||
/ Owner gets FULL_CONTROL.
|
||||
2 | The AllUsers group gets READ access.
|
||||
\ (public-read)
|
||||
acl>
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n>
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: Hetzner
|
||||
- access_key_id: ACCESS_KEY
|
||||
- secret_access_key: SECRET_KEY
|
||||
Keep this "my-hetzner" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d>
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
my-hetzner s3
|
||||
|
||||
e) Edit existing remote
|
||||
n) New remote
|
||||
d) Delete remote
|
||||
r) Rename remote
|
||||
c) Copy remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
e/n/d/r/c/s/q>
|
||||
```
|
||||
|
||||
This will leave the config file looking like this.
|
||||
|
||||
```
|
||||
[my-hetzner]
|
||||
type = s3
|
||||
provider = Hetzner
|
||||
access_key_id = ACCESS_KEY
|
||||
secret_access_key = SECRET_KEY
|
||||
region = hel1
|
||||
endpoint = hel1.your-objectstorage.com
|
||||
acl = private
|
||||
```
|
||||
|
||||
|
||||
### Huawei OBS {#huawei-obs}
|
||||
|
||||
Object Storage Service (OBS) provides stable, secure, efficient, and easy-to-use cloud storage that lets you store virtually any volume of unstructured data in any format and access it from anywhere.
|
||||
@@ -5635,6 +5782,244 @@ Name Type
|
||||
qiniu s3
|
||||
```
|
||||
|
||||
### FileLu S5 {#filelu-s5}
|
||||
|
||||
[FileLu S5 Object Storage](https://s5lu.com) is an S3-compatible object storage system.
|
||||
It provides multiple region options (Global, US-East, EU-Central, AP-Southeast, and ME-Central) while using a single endpoint (`s5lu.com`).
|
||||
FileLu S5 is designed for scalability, security, and simplicity, with predictable pricing and no hidden charges for data transfers or API requests.
|
||||
|
||||
Here is an example of making a configuration. First run:
|
||||
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process.
|
||||
|
||||
```text
|
||||
No remotes found, make a new one\?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> s5lu
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS,... FileLu, ...
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[snip]
|
||||
XX / FileLu S5 Object Storage
|
||||
\ (FileLu)
|
||||
[snip]
|
||||
provider> FileLu
|
||||
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth>
|
||||
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> XXX
|
||||
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> XXX
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for S3 API.
|
||||
Required when using an S3 clone.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / Global
|
||||
\ (global)
|
||||
2 / North America (US-East)
|
||||
\ (us-east)
|
||||
3 / Europe (EU-Central)
|
||||
\ (eu-central)
|
||||
4 / Asia Pacific (AP-Southeast)
|
||||
\ (ap-southeast)
|
||||
5 / Middle East (ME-Central)
|
||||
\ (me-central)
|
||||
region> 1
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: FileLu
|
||||
- access_key_id: XXX
|
||||
- secret_access_key: XXX
|
||||
- endpoint: s5lu.com
|
||||
Keep this "s5lu" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
```
|
||||
|
||||
This will leave the config file looking like this.
|
||||
|
||||
```
|
||||
[s5lu]
|
||||
type = s3
|
||||
provider = FileLu
|
||||
access_key_id = XXX
|
||||
secret_access_key = XXX
|
||||
endpoint = s5lu.com
|
||||
```
|
||||
|
||||
### Rabata {#Rabata}
|
||||
|
||||
[Rabata](https://rabata.io) is an S3-compatible secure cloud storage service that offers flat, transparent pricing (no API request fees)
|
||||
while supporting standard S3 APIs. It is suitable for backup, application storage,media workflows, and archive use cases.
|
||||
|
||||
Server side copy is not implemented with Rabata, also meaning modification time of objects cannot be updated.
|
||||
|
||||
Rclone config:
|
||||
|
||||
```
|
||||
rclone config
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
n/s/q> n
|
||||
|
||||
Enter name for new remote.
|
||||
name> Rabata
|
||||
|
||||
Option Storage.
|
||||
Type of storage to configure.
|
||||
Choose a number from below, or type in your own value.
|
||||
[snip]
|
||||
XX / Amazon S3 Compliant Storage Providers including AWS, ...
|
||||
\ (s3)
|
||||
[snip]
|
||||
Storage> s3
|
||||
|
||||
Option provider.
|
||||
Choose your S3 provider.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
[snip]
|
||||
XX / Rabata Cloud Storage
|
||||
\ (Rabata)
|
||||
[snip]
|
||||
provider> Rabata
|
||||
|
||||
Option env_auth.
|
||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
||||
Only applies if access_key_id and secret_access_key is blank.
|
||||
Choose a number from below, or type in your own boolean value (true or false).
|
||||
Press Enter for the default (false).
|
||||
1 / Enter AWS credentials in the next step.
|
||||
\ (false)
|
||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
||||
\ (true)
|
||||
env_auth>
|
||||
|
||||
Option access_key_id.
|
||||
AWS Access Key ID.
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
access_key_id> ACCESS_KEY_ID
|
||||
|
||||
Option secret_access_key.
|
||||
AWS Secret Access Key (password).
|
||||
Leave blank for anonymous access or runtime credentials.
|
||||
Enter a value. Press Enter to leave empty.
|
||||
secret_access_key> SECRET_ACCESS_KEY
|
||||
|
||||
Option region.
|
||||
Region where your bucket will be created and your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / US East (N. Virginia)
|
||||
\ (us-east-1)
|
||||
2 / EU (Ireland)
|
||||
\ (eu-west-1)
|
||||
3 / EU (London)
|
||||
\ (eu-west-2)
|
||||
region> 3
|
||||
|
||||
Option endpoint.
|
||||
Endpoint for Rabata Object Storage.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / US East (N. Virginia)
|
||||
\ (s3.us-east-1.rabata.io)
|
||||
2 / EU West (Ireland)
|
||||
\ (s3.eu-west-1.rabata.io)
|
||||
3 / EU West (London)
|
||||
\ (s3.eu-west-2.rabata.io)
|
||||
endpoint> 3
|
||||
|
||||
Option location_constraint.
|
||||
location where your bucket will be created and your data stored.
|
||||
Choose a number from below, or type in your own value.
|
||||
Press Enter to leave empty.
|
||||
1 / US East (N. Virginia)
|
||||
\ (us-east-1)
|
||||
2 / EU (Ireland)
|
||||
\ (eu-west-1)
|
||||
3 / EU (London)
|
||||
\ (eu-west-2)
|
||||
location_constraint> 3
|
||||
|
||||
Edit advanced config?
|
||||
y) Yes
|
||||
n) No (default)
|
||||
y/n> n
|
||||
|
||||
Configuration complete.
|
||||
Options:
|
||||
- type: s3
|
||||
- provider: Rabata
|
||||
- access_key_id: ACCESS_KEY_ID
|
||||
- secret_access_key: SECRET_ACCESS_KEY
|
||||
- region: eu-west-2
|
||||
- endpoint: s3.eu-west-2.rabata.io
|
||||
- location_constraint: eu-west-2
|
||||
Keep this "rabata" remote?
|
||||
y) Yes this is OK (default)
|
||||
e) Edit this remote
|
||||
d) Delete this remote
|
||||
y/e/d> y
|
||||
|
||||
Current remotes:
|
||||
|
||||
Name Type
|
||||
==== ====
|
||||
rabata s3
|
||||
```
|
||||
|
||||
### RackCorp {#RackCorp}
|
||||
|
||||
[RackCorp Object Storage](https://www.rackcorp.com/storage/s3storage) is an S3 compatible object storage platform from your friendly cloud provider RackCorp.
|
||||
|
||||
@@ -84,7 +84,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Citrix ShareFile. This only runs from the moment it opens
|
||||
|
||||
@@ -62,11 +62,12 @@ Thank you very much to our sponsors:
|
||||
{{< sponsor src="/img/logos/backblaze.svg" width="300" height="200" title="Visit our sponsor Backblaze" link="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715">}}
|
||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
|
||||
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://github.com/rclone-ui/rclone-ui">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="250" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
@@ -61,7 +61,7 @@ y/e/d> y
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Note that rclone runs a webserver on your local machine to collect the
|
||||
token as returned from Yandex Disk. This only runs from the moment it
|
||||
|
||||
@@ -80,7 +80,7 @@ y/e/d>
|
||||
```
|
||||
|
||||
See the [remote setup docs](/remote_setup/) for how to set it up on a
|
||||
machine with no Internet browser available.
|
||||
machine without an internet-connected web browser available.
|
||||
|
||||
Rclone runs a webserver on your local computer to collect the
|
||||
authorization token from Zoho Workdrive. This is only from the moment
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
<a class="dropdown-item" href="/filtering/"><i class="fa fa-book fa-fw"></i> Filtering</a>
|
||||
<a class="dropdown-item" href="/gui/"><i class="fa fa-book fa-fw"></i> GUI</a>
|
||||
<a class="dropdown-item" href="/rc/"><i class="fa fa-book fa-fw"></i> Remote Control</a>
|
||||
<a class="dropdown-item" href="/remote_setup/"><i class="fa fa-book fa-fw"></i> Remote Setup</a>
|
||||
<a class="dropdown-item" href="/cluster/"><i class="fa fa-book fa-fw"></i> Cluster</a>
|
||||
<a class="dropdown-item" href="/changelog/"><i class="fa fa-book fa-fw"></i> Changelog</a>
|
||||
<a class="dropdown-item" href="/bugs/"><i class="fa fa-book fa-fw"></i> Bugs</a>
|
||||
<a class="dropdown-item" href="/faq/"><i class="fa fa-book fa-fw"></i> FAQ</a>
|
||||
|
||||
@@ -82,7 +82,7 @@ type accountValues struct {
|
||||
max int64 // if >=0 the max number of bytes to transfer
|
||||
start time.Time // Start time of first read
|
||||
lpTime time.Time // Time of last average measurement
|
||||
lpBytes int // Number of bytes read since last measurement
|
||||
lpBytes int64 // Number of bytes read since last measurement
|
||||
avg float64 // Moving average of last few measurements in Byte/s
|
||||
}
|
||||
|
||||
@@ -344,15 +344,20 @@ func (acc *Account) limitPerFileBandwidth(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
// Account the read and limit bandwidth
|
||||
func (acc *Account) accountRead(n int) {
|
||||
// Account the read
|
||||
func (acc *Account) accountReadN(n int64) {
|
||||
// Update Stats
|
||||
acc.values.mu.Lock()
|
||||
acc.values.lpBytes += n
|
||||
acc.values.bytes += int64(n)
|
||||
acc.values.bytes += n
|
||||
acc.values.mu.Unlock()
|
||||
|
||||
acc.stats.Bytes(int64(n))
|
||||
acc.stats.Bytes(n)
|
||||
}
|
||||
|
||||
// Account the read and limit bandwidth
|
||||
func (acc *Account) accountRead(n int) {
|
||||
acc.accountReadN(int64(n))
|
||||
|
||||
TokenBucket.LimitBandwidth(TokenBucketSlotAccounting, n)
|
||||
acc.limitPerFileBandwidth(n)
|
||||
@@ -427,6 +432,15 @@ func (acc *Account) AccountRead(n int) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// AccountReadN account having read n bytes
|
||||
//
|
||||
// Does not obey any transfer limits, bandwidth limits, etc.
|
||||
func (acc *Account) AccountReadN(n int64) {
|
||||
acc.mu.Lock()
|
||||
defer acc.mu.Unlock()
|
||||
acc.accountReadN(n)
|
||||
}
|
||||
|
||||
// Close the object
|
||||
func (acc *Account) Close() error {
|
||||
acc.mu.Lock()
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestAccountRead(t *testing.T) {
|
||||
|
||||
assert.True(t, acc.values.start.IsZero())
|
||||
acc.values.mu.Lock()
|
||||
assert.Equal(t, 0, acc.values.lpBytes)
|
||||
assert.Equal(t, int64(0), acc.values.lpBytes)
|
||||
assert.Equal(t, int64(0), acc.values.bytes)
|
||||
acc.values.mu.Unlock()
|
||||
assert.Equal(t, int64(0), stats.bytes)
|
||||
@@ -113,7 +113,7 @@ func TestAccountRead(t *testing.T) {
|
||||
|
||||
assert.False(t, acc.values.start.IsZero())
|
||||
acc.values.mu.Lock()
|
||||
assert.Equal(t, 2, acc.values.lpBytes)
|
||||
assert.Equal(t, int64(2), acc.values.lpBytes)
|
||||
assert.Equal(t, int64(2), acc.values.bytes)
|
||||
acc.values.mu.Unlock()
|
||||
assert.Equal(t, int64(2), stats.bytes)
|
||||
@@ -145,7 +145,7 @@ func testAccountWriteTo(t *testing.T, withBuffer bool) {
|
||||
|
||||
assert.True(t, acc.values.start.IsZero())
|
||||
acc.values.mu.Lock()
|
||||
assert.Equal(t, 0, acc.values.lpBytes)
|
||||
assert.Equal(t, int64(0), acc.values.lpBytes)
|
||||
assert.Equal(t, int64(0), acc.values.bytes)
|
||||
acc.values.mu.Unlock()
|
||||
assert.Equal(t, int64(0), stats.bytes)
|
||||
@@ -159,7 +159,7 @@ func testAccountWriteTo(t *testing.T, withBuffer bool) {
|
||||
|
||||
assert.False(t, acc.values.start.IsZero())
|
||||
acc.values.mu.Lock()
|
||||
assert.Equal(t, len(buf), acc.values.lpBytes)
|
||||
assert.Equal(t, int64(len(buf)), acc.values.lpBytes)
|
||||
assert.Equal(t, int64(len(buf)), acc.values.bytes)
|
||||
acc.values.mu.Unlock()
|
||||
assert.Equal(t, int64(len(buf)), stats.bytes)
|
||||
|
||||
@@ -22,48 +22,52 @@ const (
|
||||
averageStopAfter = time.Minute
|
||||
)
|
||||
|
||||
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
||||
// MaxCompletedTransfers specifies the default maximum number of
|
||||
// completed transfers in startedTransfers list. This can be adjusted
|
||||
// for a given StatsInfo by calling the SetMaxCompletedTransfers
|
||||
// method.
|
||||
var MaxCompletedTransfers = 100
|
||||
|
||||
// StatsInfo accounts all transfers
|
||||
// N.B.: if this struct is modified, please remember to also update sum() function in stats_groups
|
||||
// to correctly count the updated fields
|
||||
type StatsInfo struct {
|
||||
mu sync.RWMutex
|
||||
ctx context.Context
|
||||
ci *fs.ConfigInfo
|
||||
bytes int64
|
||||
errors int64
|
||||
lastError error
|
||||
fatalError bool
|
||||
retryError bool
|
||||
retryAfter time.Time
|
||||
checks int64
|
||||
checking *transferMap
|
||||
checkQueue int
|
||||
checkQueueSize int64
|
||||
transfers int64
|
||||
transferring *transferMap
|
||||
transferQueue int
|
||||
transferQueueSize int64
|
||||
listed int64
|
||||
renames int64
|
||||
renameQueue int
|
||||
renameQueueSize int64
|
||||
deletes int64
|
||||
deletesSize int64
|
||||
deletedDirs int64
|
||||
inProgress *inProgress
|
||||
startedTransfers []*Transfer // currently active transfers
|
||||
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
|
||||
oldDuration time.Duration // duration of transfers we have culled
|
||||
group string
|
||||
startTime time.Time // the moment these stats were initialized or reset
|
||||
average averageValues
|
||||
serverSideCopies int64
|
||||
serverSideCopyBytes int64
|
||||
serverSideMoves int64
|
||||
serverSideMoveBytes int64
|
||||
mu sync.RWMutex
|
||||
ctx context.Context
|
||||
ci *fs.ConfigInfo
|
||||
bytes int64
|
||||
errors int64
|
||||
lastError error
|
||||
fatalError bool
|
||||
retryError bool
|
||||
retryAfter time.Time
|
||||
checks int64
|
||||
checking *transferMap
|
||||
checkQueue int
|
||||
checkQueueSize int64
|
||||
transfers int64
|
||||
transferring *transferMap
|
||||
transferQueue int
|
||||
transferQueueSize int64
|
||||
listed int64
|
||||
renames int64
|
||||
renameQueue int
|
||||
renameQueueSize int64
|
||||
deletes int64
|
||||
deletesSize int64
|
||||
deletedDirs int64
|
||||
inProgress *inProgress
|
||||
startedTransfers []*Transfer // currently active transfers
|
||||
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
|
||||
oldDuration time.Duration // duration of transfers we have culled
|
||||
group string
|
||||
startTime time.Time // the moment these stats were initialized or reset
|
||||
average averageValues
|
||||
serverSideCopies int64
|
||||
serverSideCopyBytes int64
|
||||
serverSideMoves int64
|
||||
serverSideMoveBytes int64
|
||||
maxCompletedTransfers int
|
||||
}
|
||||
|
||||
type averageValues struct {
|
||||
@@ -81,17 +85,26 @@ type averageValues struct {
|
||||
func NewStats(ctx context.Context) *StatsInfo {
|
||||
ci := fs.GetConfig(ctx)
|
||||
s := &StatsInfo{
|
||||
ctx: ctx,
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
startTime: time.Now(),
|
||||
average: averageValues{},
|
||||
ctx: ctx,
|
||||
ci: ci,
|
||||
checking: newTransferMap(ci.Checkers, "checking"),
|
||||
transferring: newTransferMap(ci.Transfers, "transferring"),
|
||||
inProgress: newInProgress(ctx),
|
||||
startTime: time.Now(),
|
||||
average: averageValues{},
|
||||
maxCompletedTransfers: MaxCompletedTransfers,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMaxCompletedTransfers sets the maximum number of completed transfers to keep.
|
||||
func (s *StatsInfo) SetMaxCompletedTransfers(n int) *StatsInfo {
|
||||
s.mu.Lock()
|
||||
s.maxCompletedTransfers = n
|
||||
s.mu.Unlock()
|
||||
return s
|
||||
}
|
||||
|
||||
// RemoteStats returns stats for rc
|
||||
//
|
||||
// If short is true then the transfers and checkers won't be added.
|
||||
@@ -912,22 +925,31 @@ func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
|
||||
}
|
||||
|
||||
// PruneTransfers makes sure there aren't too many old transfers by removing
|
||||
// single finished transfer.
|
||||
func (s *StatsInfo) PruneTransfers() {
|
||||
if MaxCompletedTransfers < 0 {
|
||||
return
|
||||
}
|
||||
// a single finished transfer. Returns true if it removed a transfer.
|
||||
func (s *StatsInfo) PruneTransfers() bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.maxCompletedTransfers < 0 {
|
||||
return false
|
||||
}
|
||||
removed := false
|
||||
// remove a transfer from the start if we are over quota
|
||||
if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers {
|
||||
if len(s.startedTransfers) > s.maxCompletedTransfers+s.ci.Transfers {
|
||||
for i, tr := range s.startedTransfers {
|
||||
if tr.IsDone() {
|
||||
s._removeTransfer(tr, i)
|
||||
removed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return removed
|
||||
}
|
||||
|
||||
// RemoveDoneTransfers removes all Done transfers.
|
||||
func (s *StatsInfo) RemoveDoneTransfers() {
|
||||
for s.PruneTransfers() {
|
||||
}
|
||||
}
|
||||
|
||||
// AddServerSideMove counts a server side move
|
||||
|
||||
@@ -465,3 +465,27 @@ func TestPruneTransfers(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveDoneTransfers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
s := NewStats(ctx)
|
||||
const transfers = 10
|
||||
for i := int64(1); i <= int64(transfers); i++ {
|
||||
s.AddTransfer(&Transfer{
|
||||
startedAt: time.Unix(i, 0),
|
||||
completedAt: time.Unix(i+1, 0),
|
||||
})
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
assert.Equal(t, time.Duration(transfers)*time.Second, s._totalDuration())
|
||||
assert.Equal(t, transfers, len(s.startedTransfers))
|
||||
s.mu.Unlock()
|
||||
|
||||
s.RemoveDoneTransfers()
|
||||
|
||||
s.mu.Lock()
|
||||
assert.Equal(t, time.Duration(transfers)*time.Second, s._totalDuration())
|
||||
assert.Equal(t, transfers, len(s.startedTransfers))
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ func (x *BwTimetable) Set(s string) error {
|
||||
}
|
||||
|
||||
// Split the timetable string by both spaces and semicolons
|
||||
for _, tok := range strings.FieldsFunc(s, func(r rune) bool {
|
||||
for tok := range strings.FieldsFuncSeq(s, func(r rune) bool {
|
||||
return r == ' ' || r == ';'
|
||||
}) {
|
||||
tv := strings.Split(tok, ",")
|
||||
|
||||
598
fs/cluster/cluster.go
Normal file
598
fs/cluster/cluster.go
Normal file
@@ -0,0 +1,598 @@
|
||||
// Package cluster implements a machanism to distribute work over a
|
||||
// cluster of rclone instances.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/errcount"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// ErrClusterNotConfigured is returned from creation functions.
|
||||
var ErrClusterNotConfigured = errors.New("cluster is not configured")
|
||||
|
||||
// If we don't hear from workers in this time we assume they have timed out
|
||||
// and re-assign their jobs.
|
||||
const workerTimeout = 2 * time.Second
|
||||
|
||||
// Cluster describes the workings of the current cluster.
|
||||
type Cluster struct {
|
||||
jobs *Jobs
|
||||
id string
|
||||
batchFiles int
|
||||
batchSize fs.SizeSuffix
|
||||
cleanup fs.ClusterCleanup // how we cleanup cluster files
|
||||
_config rc.Params // for rc
|
||||
_filter rc.Params // for rc
|
||||
cancel func() // stop bg job
|
||||
wg sync.WaitGroup // bg job finished
|
||||
quit chan struct{} // signal graceful stop
|
||||
sync chan chan<- struct{} // sync the current jobs
|
||||
quitWorkers bool // if set, send workers a stop signal on Shutdown
|
||||
|
||||
workers map[string]*WorkerStatus // worker ID => status
|
||||
deadWorkers map[string]struct{}
|
||||
|
||||
mu sync.Mutex
|
||||
currentBatch Batch
|
||||
inflight map[string]Batch
|
||||
shutdown bool
|
||||
}
|
||||
|
||||
// Batch is a collection of rc tasks to do
|
||||
type Batch struct {
|
||||
size int64 // size in batch
|
||||
Path string `json:"_path"`
|
||||
Inputs []rc.Params `json:"inputs"`
|
||||
Config rc.Params `json:"_config,omitempty"`
|
||||
Filter rc.Params `json:"_filter,omitempty"`
|
||||
|
||||
trs []*accounting.Transfer // transfer for each Input
|
||||
sizes []int64 // sizes for each Input
|
||||
}
|
||||
|
||||
// BatchResult has the results of the batch as received.
|
||||
type BatchResult struct {
|
||||
Results []rc.Params `json:"results"`
|
||||
|
||||
// Error returns
|
||||
Error string `json:"error"`
|
||||
Status int `json:"status"`
|
||||
Input string `json:"input"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// NewCluster creates a new cluster from the config in ctx.
|
||||
//
|
||||
// It may return nil for no cluster is configured.
|
||||
func NewCluster(ctx context.Context) (*Cluster, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.Cluster == "" {
|
||||
return nil, nil
|
||||
}
|
||||
jobs, err := NewJobs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Cluster{
|
||||
jobs: jobs,
|
||||
id: ci.ClusterID,
|
||||
quitWorkers: ci.ClusterQuitWorkers,
|
||||
batchFiles: ci.ClusterBatchFiles,
|
||||
batchSize: ci.ClusterBatchSize,
|
||||
cleanup: ci.ClusterCleanup,
|
||||
quit: make(chan struct{}),
|
||||
sync: make(chan chan<- struct{}),
|
||||
inflight: make(map[string]Batch),
|
||||
workers: make(map[string]*WorkerStatus),
|
||||
deadWorkers: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
// Configure _config
|
||||
configParams, err := fs.ConfigOptionsInfo.NonDefaultRC(ci)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read global config: %w", err)
|
||||
}
|
||||
// Remove any global cluster config
|
||||
for k := range configParams {
|
||||
if strings.HasPrefix(k, "Cluster") {
|
||||
delete(configParams, k)
|
||||
}
|
||||
}
|
||||
if len(configParams) != 0 {
|
||||
fs.Debugf(nil, "Overridden global config: %#v", configParams)
|
||||
}
|
||||
c._config = rc.Params(configParams)
|
||||
|
||||
// Configure _filter
|
||||
fi := filter.GetConfig(ctx)
|
||||
if !fi.InActive() {
|
||||
filterParams, err := filter.OptionsInfo.NonDefaultRC(fi)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read filter config: %w", err)
|
||||
}
|
||||
fs.Debugf(nil, "Overridden filter config: %#v", filterParams)
|
||||
c._filter = rc.Params(filterParams)
|
||||
}
|
||||
|
||||
err = c.jobs.createDirectoryStructure(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start the background worker
|
||||
bgCtx, cancel := context.WithCancel(context.Background())
|
||||
c.cancel = cancel
|
||||
c.wg.Add(1)
|
||||
go c.run(bgCtx)
|
||||
|
||||
fs.Logf(c.jobs.f, "Started cluster master")
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
var (
|
||||
globalClusterMu sync.Mutex
|
||||
globalCluster *Cluster
|
||||
)
|
||||
|
||||
// GetCluster starts or gets a cluster.
|
||||
//
|
||||
// If no cluster is configured or the cluster can't be started then it
|
||||
// returns nil.
|
||||
func GetCluster(ctx context.Context) *Cluster {
|
||||
globalClusterMu.Lock()
|
||||
defer globalClusterMu.Unlock()
|
||||
|
||||
if globalCluster != nil {
|
||||
return globalCluster
|
||||
}
|
||||
|
||||
cluster, err := NewCluster(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to start cluster: %v", err)
|
||||
return nil
|
||||
}
|
||||
if cluster != nil {
|
||||
atexit.Register(func() {
|
||||
err := cluster.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Failed to shutdown cluster: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
globalCluster = cluster
|
||||
return globalCluster
|
||||
}
|
||||
|
||||
// Send the current batch for processing
|
||||
//
|
||||
// call with c.mu held
|
||||
func (c *Cluster) sendBatch(ctx context.Context) (err error) {
|
||||
// Do nothing if the batch is empty
|
||||
if len(c.currentBatch.Inputs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get and reset current batch
|
||||
b := c.currentBatch
|
||||
c.currentBatch = Batch{}
|
||||
|
||||
b.Path = "job/batch"
|
||||
b.Config = c._config
|
||||
b.Filter = c._filter
|
||||
|
||||
// write the pending job
|
||||
name, err := c.jobs.writeJob(ctx, clusterPending, &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(name, "written cluster batch file")
|
||||
c.inflight[name] = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add the command to the current batch
|
||||
func (c *Cluster) addToBatch(ctx context.Context, obj fs.Object, in rc.Params, size int64, tr *accounting.Transfer) (err error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.shutdown {
|
||||
return errors.New("internal error: can't add file to Shutdown cluster")
|
||||
}
|
||||
|
||||
c.currentBatch.Inputs = append(c.currentBatch.Inputs, in)
|
||||
c.currentBatch.size += size
|
||||
c.currentBatch.trs = append(c.currentBatch.trs, tr)
|
||||
c.currentBatch.sizes = append(c.currentBatch.sizes, size)
|
||||
|
||||
if c.currentBatch.size >= int64(c.batchSize) || len(c.currentBatch.Inputs) >= c.batchFiles {
|
||||
err = c.sendBatch(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Move does operations.Move via the cluster.
|
||||
//
|
||||
// Move src object to dst or fdst if nil. If dst is nil then it uses
|
||||
// remote as the name of the new object.
|
||||
func (c *Cluster) Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (err error) {
|
||||
tr := accounting.Stats(ctx).NewTransfer(src, fdst)
|
||||
if operations.SkipDestructive(ctx, src, "cluster move") {
|
||||
in := tr.Account(ctx, nil)
|
||||
in.DryRun(src.Size())
|
||||
tr.Done(ctx, nil)
|
||||
return nil
|
||||
}
|
||||
fsrc, ok := src.Fs().(fs.Fs)
|
||||
if !ok {
|
||||
err = errors.New("internal error: cluster move: can't cast src.Fs() to fs.Fs")
|
||||
tr.Done(ctx, err)
|
||||
return err
|
||||
}
|
||||
in := rc.Params{
|
||||
"_path": "operations/movefile",
|
||||
"dstFs": fs.ConfigStringFull(fdst),
|
||||
"dstRemote": remote,
|
||||
"srcFs": fs.ConfigStringFull(fsrc),
|
||||
"srcRemote": src.Remote(),
|
||||
}
|
||||
if dst != nil {
|
||||
in["dstRemote"] = dst.Remote()
|
||||
}
|
||||
return c.addToBatch(ctx, src, in, src.Size(), tr)
|
||||
}
|
||||
|
||||
// Copy does operations.Copy via the cluster.
|
||||
//
|
||||
// Copy src object to dst or fdst if nil. If dst is nil then it uses
|
||||
// remote as the name of the new object.
|
||||
func (c *Cluster) Copy(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (err error) {
|
||||
tr := accounting.Stats(ctx).NewTransfer(src, fdst)
|
||||
if operations.SkipDestructive(ctx, src, "cluster copy") {
|
||||
in := tr.Account(ctx, nil)
|
||||
in.DryRun(src.Size())
|
||||
tr.Done(ctx, nil)
|
||||
return nil
|
||||
}
|
||||
fsrc, ok := src.Fs().(fs.Fs)
|
||||
if !ok {
|
||||
err = errors.New("internal error: cluster copy: can't cast src.Fs() to fs.Fs")
|
||||
tr.Done(ctx, err)
|
||||
return err
|
||||
}
|
||||
in := rc.Params{
|
||||
"_path": "operations/copyfile",
|
||||
"dstFs": fs.ConfigStringFull(fdst),
|
||||
"dstRemote": remote,
|
||||
"srcFs": fs.ConfigStringFull(fsrc),
|
||||
"srcRemote": src.Remote(),
|
||||
}
|
||||
if dst != nil {
|
||||
in["dstRemote"] = dst.Remote()
|
||||
}
|
||||
return c.addToBatch(ctx, src, in, src.Size(), tr)
|
||||
}
|
||||
|
||||
// DeleteFile does operations.DeleteFile via the cluster
|
||||
//
|
||||
// If useBackupDir is set and --backup-dir is in effect then it moves
|
||||
// the file to there instead of deleting
|
||||
func (c *Cluster) DeleteFile(ctx context.Context, dst fs.Object) (err error) {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst, "deleting")
|
||||
err = accounting.Stats(ctx).DeleteFile(ctx, dst.Size())
|
||||
if err != nil {
|
||||
tr.Done(ctx, err)
|
||||
return err
|
||||
}
|
||||
if operations.SkipDestructive(ctx, dst, "cluster delete") {
|
||||
tr.Done(ctx, nil)
|
||||
return
|
||||
}
|
||||
fdst, ok := dst.Fs().(fs.Fs)
|
||||
if !ok {
|
||||
err = errors.New("internal error: cluster delete: can't cast dst.Fs() to fs.Fs")
|
||||
tr.Done(ctx, nil)
|
||||
return err
|
||||
}
|
||||
in := rc.Params{
|
||||
"_path": "operations/deletefile",
|
||||
"fs": fs.ConfigStringFull(fdst),
|
||||
"remote": dst.Remote(),
|
||||
}
|
||||
return c.addToBatch(ctx, dst, in, 0, tr)
|
||||
}
|
||||
|
||||
// processCompletedJob loads the job and checks it off
|
||||
func (c *Cluster) processCompletedJob(ctx context.Context, obj fs.Object) error {
|
||||
name := path.Base(obj.Remote())
|
||||
name, _ = strings.CutSuffix(name, ".json")
|
||||
fs.Debugf(nil, "cluster: processing completed job %q", name)
|
||||
|
||||
var output BatchResult
|
||||
err := c.jobs.readJob(ctx, obj, &output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("check jobs read: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
input, ok := c.inflight[name]
|
||||
// FIXME delete or save job
|
||||
if !ok {
|
||||
for k := range c.inflight {
|
||||
fs.Debugf(nil, "key %q", k)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return fmt.Errorf("check jobs: job %q not found", name)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
// Delete the inflight entry when batch is processed
|
||||
defer func() {
|
||||
c.mu.Lock()
|
||||
delete(c.inflight, name)
|
||||
c.mu.Unlock()
|
||||
}()
|
||||
|
||||
// Check job
|
||||
if output.Error != "" {
|
||||
return fmt.Errorf("cluster: failed to run batch job: %s (%d)", output.Error, output.Status)
|
||||
}
|
||||
if len(input.Inputs) != len(output.Results) {
|
||||
return fmt.Errorf("cluster: input had %d jobs but output had %d", len(input.Inputs), len(output.Results))
|
||||
}
|
||||
|
||||
// Run through the batch and mark operations as successful or not
|
||||
for i := range input.Inputs {
|
||||
in := input.Inputs[i]
|
||||
tr := input.trs[i]
|
||||
size := input.sizes[i]
|
||||
out := output.Results[i]
|
||||
errorString, hasError := out["error"]
|
||||
var err error
|
||||
if hasError && errorString != "" {
|
||||
err = fmt.Errorf("cluster: worker error: %s (%v)", errorString, out["status"])
|
||||
}
|
||||
if err == nil && in["_path"] == "operations/movefile" {
|
||||
accounting.Stats(ctx).Renames(1)
|
||||
}
|
||||
acc := tr.Account(ctx, nil)
|
||||
acc.AccountReadN(size)
|
||||
tr.Done(ctx, err)
|
||||
remote, ok := in["dstRemote"]
|
||||
if !ok {
|
||||
remote = in["remote"]
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(remote, "cluster %s successful", in["_path"])
|
||||
} else {
|
||||
fs.Errorf(remote, "cluster %s failed: %v", in["_path"], err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadWorkerStatus updates the worker status
|
||||
func (c *Cluster) loadWorkerStatus(ctx context.Context) error {
|
||||
objs, err := c.jobs.listDir(ctx, clusterStatus)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cluster: get job status list failed: %w", err)
|
||||
}
|
||||
ec := errcount.New()
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
var mu sync.Mutex
|
||||
for _, obj := range objs {
|
||||
g.Go(func() error {
|
||||
buf, err := c.jobs.readFile(gCtx, obj)
|
||||
if err != nil {
|
||||
ec.Add(fmt.Errorf("read object: %w", err))
|
||||
return nil
|
||||
}
|
||||
workerStatus := new(WorkerStatus)
|
||||
err = json.Unmarshal(buf, workerStatus)
|
||||
if err != nil {
|
||||
ec.Add(fmt.Errorf("status json: %w", err))
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
c.workers[workerStatus.ID] = workerStatus
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return ec.Err("cluster: load status")
|
||||
}
|
||||
|
||||
// checkWorkers loads the worker status
|
||||
func (c *Cluster) checkWorkers(ctx context.Context) {
|
||||
err := c.loadWorkerStatus(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "failed to read some worker status: %v", err)
|
||||
}
|
||||
for workerID, status := range c.workers {
|
||||
timeSinceUpdated := time.Since(status.Updated)
|
||||
if timeSinceUpdated > workerTimeout {
|
||||
if _, isDead := c.deadWorkers[workerID]; isDead {
|
||||
continue
|
||||
}
|
||||
fs.Errorf(nil, "cluster: haven't heard from worker %q for %v - assuming dead", workerID, timeSinceUpdated)
|
||||
// Find any jobs claimed by worker and restart
|
||||
objs, err := c.jobs.listDir(ctx, clusterProcessing)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: failed to find pending jobs: %v", err)
|
||||
continue
|
||||
}
|
||||
for _, obj := range objs {
|
||||
fs.Errorf(obj, "cluster: checking job")
|
||||
// Jobs are named {jobID}-{workerID}.json
|
||||
name := strings.TrimSuffix(path.Base(obj.Remote()), ".json")
|
||||
dash := strings.LastIndex(name, "-")
|
||||
if dash < 0 {
|
||||
fs.Errorf(nil, "cluster: failed to find dash in job %q", name)
|
||||
continue
|
||||
}
|
||||
jobID, jobWorkerID := name[:dash], name[dash+1:]
|
||||
fs.Errorf(obj, "cluster: checking jobID %q, workerID %q", jobID, jobWorkerID)
|
||||
if workerID != jobWorkerID {
|
||||
fs.Debugf(nil, "cluster: job %q doesn't match %q", jobWorkerID, workerID)
|
||||
continue
|
||||
}
|
||||
// Found a job running on worker - rename it back to Pending
|
||||
newRemote := path.Join(clusterPending, jobID+".json")
|
||||
_, err = c.jobs.rename(ctx, obj, newRemote)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: failed to restart job %q: %v", jobID, err)
|
||||
continue
|
||||
}
|
||||
fs.Errorf(nil, "cluster: restarted job %q", jobID)
|
||||
}
|
||||
c.deadWorkers[workerID] = struct{}{}
|
||||
} else {
|
||||
if _, isDead := c.deadWorkers[workerID]; isDead {
|
||||
fs.Errorf(nil, "cluster: dead worker %q came back to life!", workerID)
|
||||
delete(c.deadWorkers, workerID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkJobs sees if there are any completed jobs
|
||||
func (c *Cluster) checkJobs(ctx context.Context) {
|
||||
objs, err := c.jobs.listDir(ctx, clusterDone)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: get completed job list failed: %v", err)
|
||||
return
|
||||
}
|
||||
for _, obj := range objs {
|
||||
err := c.processCompletedJob(ctx, obj)
|
||||
status := "output-ok"
|
||||
ok := true
|
||||
if err != nil {
|
||||
status = "output-failed"
|
||||
ok = false
|
||||
fs.Errorf(nil, "cluster: process completed job failed: %v", err)
|
||||
}
|
||||
c.jobs.finish(ctx, obj, status, ok)
|
||||
}
|
||||
}
|
||||
|
||||
// Run the background process
|
||||
func (c *Cluster) run(ctx context.Context) {
|
||||
defer c.wg.Done()
|
||||
checkJobs := time.NewTicker(clusterCheckJobsInterval)
|
||||
defer checkJobs.Stop()
|
||||
checkWorkers := time.NewTicker(clusterCheckWorkersInterval)
|
||||
defer checkWorkers.Stop()
|
||||
var syncedChans []chan<- struct{}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-c.quit:
|
||||
fs.Debugf(nil, "cluster: quit request received")
|
||||
return
|
||||
case synced := <-c.sync:
|
||||
syncedChans = append(syncedChans, synced)
|
||||
fs.Debugf(nil, "cluster: sync request received")
|
||||
case <-checkWorkers.C:
|
||||
c.checkWorkers(ctx)
|
||||
case <-checkJobs.C:
|
||||
}
|
||||
c.checkJobs(ctx)
|
||||
if len(syncedChans) > 0 {
|
||||
c.mu.Lock()
|
||||
n := len(c.inflight)
|
||||
c.mu.Unlock()
|
||||
if n == 0 {
|
||||
fs.Debugf(nil, "cluster: synced")
|
||||
for _, synced := range syncedChans {
|
||||
synced <- struct{}{}
|
||||
}
|
||||
syncedChans = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sync the cluster.
|
||||
//
|
||||
// Call this when all job items have been added to the cluster.
|
||||
//
|
||||
// This will wait for any outstanding jobs to finish regardless of who
|
||||
// put them in
|
||||
func (c *Cluster) Sync(ctx context.Context) error {
|
||||
// Flush any outstanding
|
||||
c.mu.Lock()
|
||||
err := c.sendBatch(ctx)
|
||||
c.mu.Unlock()
|
||||
|
||||
// Wait for the cluster to be empty
|
||||
done := make(chan struct{})
|
||||
c.sync <- done
|
||||
<-done
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown the cluster.
|
||||
//
|
||||
// Call this when all job items have been added to the cluster.
|
||||
//
|
||||
// This will wait for any outstanding jobs to finish.
|
||||
func (c *Cluster) Shutdown(ctx context.Context) (err error) {
|
||||
c.mu.Lock()
|
||||
inBatch := len(c.currentBatch.Inputs)
|
||||
inFlight := len(c.inflight)
|
||||
shutdown := c.shutdown
|
||||
c.shutdown = true
|
||||
c.mu.Unlock()
|
||||
|
||||
if inBatch > 0 {
|
||||
err = errors.Join(nil, fmt.Errorf("%d items batched on cluster shutdown", inBatch))
|
||||
}
|
||||
if inFlight > 0 {
|
||||
err = errors.Join(nil, fmt.Errorf("%d items in flight on cluster shutdown", inFlight))
|
||||
}
|
||||
if shutdown {
|
||||
fs.Debugf(nil, "cluster: already shutdown")
|
||||
return nil
|
||||
}
|
||||
c.quit <- struct{}{}
|
||||
fs.Debugf(nil, "Waiting for cluster to finish")
|
||||
c.wg.Wait()
|
||||
|
||||
// Send a quit job
|
||||
if c.quitWorkers {
|
||||
fs.Logf(nil, "Sending quit to workers")
|
||||
quitErr := c.jobs.writeQuitJob(ctx, clusterPending)
|
||||
if quitErr != nil {
|
||||
err = errors.Join(err, fmt.Errorf("shutdown quit: %w", quitErr))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Abort the cluster and any outstanding jobs.
|
||||
func (c *Cluster) Abort() {
|
||||
c.cancel()
|
||||
c.wg.Wait()
|
||||
}
|
||||
311
fs/cluster/jobs.go
Normal file
311
fs/cluster/jobs.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
)
|
||||
|
||||
// Batches flow from queue/pending to queue/processing/
|
||||
const (
|
||||
clusterQueue = "queue"
|
||||
clusterPending = clusterQueue + "/pending"
|
||||
clusterProcessing = clusterQueue + "/processing"
|
||||
clusterDone = clusterQueue + "/done"
|
||||
clusterFinished = clusterQueue + "/finished"
|
||||
clusterStatus = clusterQueue + "/status"
|
||||
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
|
||||
// Read the queue this often
|
||||
clusterCheckJobsInterval = time.Second
|
||||
|
||||
// Write the worker status this often
|
||||
clusterWriteStatusInterval = time.Second
|
||||
|
||||
// Read the worker status this often
|
||||
clusterCheckWorkersInterval = time.Second
|
||||
|
||||
// Name of job which signals to the workers to quit
|
||||
quitJob = "QUIT"
|
||||
)
|
||||
|
||||
// Jobs is a container for sending and receiving jobs to the cluster.
|
||||
type Jobs struct {
|
||||
remote string // remote for job storage
|
||||
f fs.Fs // cluster remote storage
|
||||
partial bool // do we need to write and rename
|
||||
hasMove bool // set if f has server side move otherwise has server side copy
|
||||
cleanup fs.ClusterCleanup // how we cleanup the cluster files
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// NewJobs creates a Jobs source from the config in ctx.
|
||||
//
|
||||
// It may return nil for no cluster is configured.
|
||||
func NewJobs(ctx context.Context) (*Jobs, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.Cluster == "" {
|
||||
return nil, nil
|
||||
}
|
||||
f, err := cache.Get(ctx, ci.Cluster)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cluster remote creation: %w", err)
|
||||
}
|
||||
features := f.Features()
|
||||
if features.Move == nil && features.Copy == nil {
|
||||
return nil, fmt.Errorf("cluster remote must have server side move and %q doesn't", ci.Cluster)
|
||||
}
|
||||
jobs := &Jobs{
|
||||
remote: ci.Cluster,
|
||||
f: f,
|
||||
partial: features.PartialUploads,
|
||||
hasMove: features.Move != nil,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
cleanup: ci.ClusterCleanup,
|
||||
}
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
// Create the cluster directory structure
|
||||
func (jobs *Jobs) createDirectoryStructure(ctx context.Context) (err error) {
|
||||
for _, dir := range []string{clusterPending, clusterProcessing, clusterDone, clusterFinished, clusterStatus} {
|
||||
err = jobs.f.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cluster mkdir %q: %w", dir, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rename a file
|
||||
//
|
||||
// if this returns fs.ErrorObjectNotFound then the file has already been renamed.
|
||||
func (jobs *Jobs) rename(ctx context.Context, src fs.Object, dstRemote string) (dst fs.Object, err error) {
|
||||
features := jobs.f.Features()
|
||||
if jobs.hasMove {
|
||||
dst, err = features.Move(ctx, src, dstRemote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cluster: failed to rename job file: %w", err)
|
||||
}
|
||||
} else {
|
||||
dst, err = features.Copy(ctx, src, dstRemote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cluster: failed to rename (copy phase) job file: %w", err)
|
||||
}
|
||||
err = src.Remove(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cluster: failed to rename (delete phase) job file: %w", err)
|
||||
}
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// Finish with a jobs file
|
||||
func (jobs *Jobs) finish(ctx context.Context, obj fs.Object, status string, ok bool) {
|
||||
var err error
|
||||
if (ok && jobs.cleanup == fs.ClusterCleanupCompleted) || jobs.cleanup == fs.ClusterCleanupFull {
|
||||
err = obj.Remove(ctx)
|
||||
} else {
|
||||
name := path.Join(clusterFinished, status, path.Base(obj.Remote()))
|
||||
_, err = jobs.rename(ctx, obj, name)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: removing completed job failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// write buf into remote
|
||||
func (jobs *Jobs) writeFile(ctx context.Context, remote string, modTime time.Time, buf []byte) error {
|
||||
partialRemote := remote
|
||||
if jobs.partial {
|
||||
partialRemote = remote + ".partial"
|
||||
}
|
||||
// Calculate hashes
|
||||
w, err := hash.NewMultiHasherTypes(jobs.f.Hashes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
obji := object.NewStaticObjectInfo(partialRemote, modTime, int64(len(buf)), true, w.Sums(), jobs.f)
|
||||
var obj fs.Object
|
||||
err = jobs.pacer.Call(func() (bool, error) {
|
||||
in := bytes.NewBuffer(buf)
|
||||
obj, err = jobs.f.Put(ctx, in, obji)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("cluster: failed to write %q: %q", remote, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if jobs.partial {
|
||||
obj, err = jobs.rename(ctx, obj, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the file if it exists
|
||||
func (jobs *Jobs) removeFile(ctx context.Context, remote string) error {
|
||||
obj, err := jobs.f.NewObject(ctx, remote)
|
||||
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorDirNotFound) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return obj.Remove(ctx)
|
||||
}
|
||||
|
||||
// write a job to a file returning the name
|
||||
func (jobs *Jobs) writeJob(ctx context.Context, where string, job any) (name string, err error) {
|
||||
now := time.Now().UTC()
|
||||
name = fmt.Sprintf("%s-%s", now.Format(time.RFC3339Nano), random.String(20))
|
||||
remote := path.Join(where, name+".json")
|
||||
buf, err := json.MarshalIndent(job, "", "\t")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cluster: job json: %w", err)
|
||||
}
|
||||
err = jobs.writeFile(ctx, remote, now, buf)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cluster: job write: %w", err)
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// write a quit job to a file
|
||||
func (jobs *Jobs) writeQuitJob(ctx context.Context, where string) (err error) {
|
||||
now := time.Now().UTC()
|
||||
remote := path.Join(where, quitJob+".json")
|
||||
err = jobs.writeFile(ctx, remote, now, []byte("{}"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("cluster: quit job write: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read buf from object
|
||||
func (jobs *Jobs) readFile(ctx context.Context, o fs.Object) (buf []byte, err error) {
|
||||
err = jobs.pacer.Call(func() (bool, error) {
|
||||
in, err := operations.Open(ctx, o)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("cluster: failed to open %q: %w", o, err)
|
||||
}
|
||||
buf, err = io.ReadAll(in)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("cluster: failed to read %q: %w", o, err)
|
||||
}
|
||||
err = in.Close()
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("cluster: failed to close %q: %w", o, err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// read a job from a file
|
||||
//
|
||||
// job should be a pointer to something to be unmarshalled
|
||||
func (jobs *Jobs) readJob(ctx context.Context, obj fs.Object, job any) error {
|
||||
buf, err := jobs.readFile(ctx, obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cluster: job read: %w", err)
|
||||
}
|
||||
err = json.Unmarshal(buf, job)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cluster: job read json: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lists the json files in a cluster directory
|
||||
func (jobs *Jobs) listDir(ctx context.Context, dir string) (objects []fs.Object, err error) {
|
||||
entries, err := jobs.f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cluster: failed to list %q: %w", dir, err)
|
||||
}
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
if strings.HasSuffix(o.Remote(), ".json") {
|
||||
objects = append(objects, o)
|
||||
}
|
||||
})
|
||||
slices.SortStableFunc(objects, func(a, b fs.Object) int {
|
||||
return cmp.Compare(a.Remote(), b.Remote())
|
||||
})
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
// get a job from pending if there is one available.
|
||||
//
|
||||
// Returns a nil object if no jobs are available.
|
||||
//
|
||||
// FIXME should mark jobs as error jobs in here if they can't be read properly?
|
||||
func (jobs *Jobs) getJob(ctx context.Context, id string) (name string, obj fs.Object, err error) {
|
||||
objs, err := jobs.listDir(ctx, clusterPending)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("get job list: %w", err)
|
||||
}
|
||||
quit := false
|
||||
for len(objs) > 0 {
|
||||
obj = objs[0]
|
||||
objs = objs[1:]
|
||||
name = path.Base(obj.Remote())
|
||||
name, _ = strings.CutSuffix(name, ".json")
|
||||
|
||||
// See if we have been asked to quit
|
||||
if name == quitJob {
|
||||
quit = true
|
||||
continue
|
||||
}
|
||||
|
||||
// claim the job
|
||||
newName := fmt.Sprintf("%s-%s.json", name, id)
|
||||
newRemote := path.Join(clusterProcessing, newName)
|
||||
obj, err = jobs.rename(ctx, obj, newRemote)
|
||||
if errors.Is(err, fs.ErrorObjectNotFound) {
|
||||
// claim failed - try again
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("get job claim: %w", err)
|
||||
}
|
||||
return name, obj, nil
|
||||
}
|
||||
// No jobs found
|
||||
if quit {
|
||||
fs.Logf(nil, "Exiting cluster worker on command")
|
||||
atexit.Run()
|
||||
os.Exit(0)
|
||||
}
|
||||
return "", nil, nil
|
||||
}
|
||||
211
fs/cluster/worker.go
Normal file
211
fs/cluster/worker.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
)
|
||||
|
||||
const maxWorkersDone = 16 // maximum jobs in the done list
|
||||
|
||||
// Worker describes a single instance of a cluster worker.
|
||||
type Worker struct {
|
||||
jobs *Jobs
|
||||
cancel func() // stop bg job
|
||||
wg sync.WaitGroup // bg job finished
|
||||
id string // id of this worker
|
||||
status string // place it stores it status
|
||||
|
||||
jobsMu sync.Mutex
|
||||
running map[string]struct{} // IDs of the jobs being processed
|
||||
done []string // IDs of finished jobs
|
||||
}
|
||||
|
||||
// WorkerStatus shows the status of this worker including jobs
|
||||
// running.
|
||||
type WorkerStatus struct {
|
||||
ID string `json:"id"`
|
||||
Running map[string]rc.Params `json:"running"` // Job ID => accounting.RemoteStats
|
||||
Done map[string]bool `json:"done"` // Job ID => finished status
|
||||
Updated time.Time `json:"updated"`
|
||||
}
|
||||
|
||||
// NewWorker creates a new cluster from the config in ctx.
|
||||
//
|
||||
// It may return nil for no cluster is configured.
|
||||
func NewWorker(ctx context.Context) (*Worker, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.Cluster == "" {
|
||||
return nil, nil
|
||||
}
|
||||
jobs, err := NewJobs(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w := &Worker{
|
||||
jobs: jobs,
|
||||
id: ci.ClusterID,
|
||||
running: make(map[string]struct{}),
|
||||
}
|
||||
if w.id == "" {
|
||||
w.id = random.String(10)
|
||||
}
|
||||
w.status = path.Join(clusterStatus, w.id+".json")
|
||||
|
||||
// Start the background workers
|
||||
bgCtx, cancel := context.WithCancel(context.Background())
|
||||
w.cancel = cancel
|
||||
w.wg.Add(1)
|
||||
go w.runJobs(bgCtx)
|
||||
w.wg.Add(1)
|
||||
go w.runStatus(bgCtx)
|
||||
|
||||
fs.Logf(w.jobs.f, "Started cluster worker")
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Check to see if a job exists and run it if available
|
||||
func (w *Worker) checkJobs(ctx context.Context) {
|
||||
name, obj, err := w.jobs.getJob(ctx, w.id)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "check jobs get: %v", err)
|
||||
return
|
||||
}
|
||||
if obj == nil {
|
||||
return // no jobs available
|
||||
}
|
||||
|
||||
// make a stats group for this job
|
||||
ctx = accounting.WithStatsGroup(ctx, name)
|
||||
|
||||
// Add job ID
|
||||
w.jobsMu.Lock()
|
||||
w.running[name] = struct{}{}
|
||||
w.jobsMu.Unlock()
|
||||
fs.Infof(nil, "write jobID %q", name)
|
||||
|
||||
// Remove job ID on exit
|
||||
defer func() {
|
||||
w.jobsMu.Lock()
|
||||
delete(w.running, name)
|
||||
w.done = append(w.done, name)
|
||||
if len(w.done) > maxWorkersDone {
|
||||
w.done = w.done[len(w.done)-maxWorkersDone : len(w.done)]
|
||||
}
|
||||
w.jobsMu.Unlock()
|
||||
}()
|
||||
|
||||
fs.Debugf(nil, "cluster: processing pending job %q", name)
|
||||
inBuf, err := w.jobs.readFile(ctx, obj)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "check jobs read: %v", err)
|
||||
w.jobs.finish(ctx, obj, "input-error", false)
|
||||
return
|
||||
}
|
||||
outBuf := jobs.NewJobFromBytes(ctx, inBuf)
|
||||
remote := path.Join(clusterDone, name+".json")
|
||||
err = w.jobs.writeFile(ctx, remote, time.Now(), outBuf)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "check jobs failed to write output: %v", err)
|
||||
return
|
||||
}
|
||||
w.jobs.finish(ctx, obj, "input-ok", true)
|
||||
fs.Debugf(nil, "cluster: processed pending job %q", name)
|
||||
}
|
||||
|
||||
// Run the background process to pick up jobs
|
||||
func (w *Worker) runJobs(ctx context.Context) {
|
||||
defer w.wg.Done()
|
||||
checkJobs := time.NewTicker(clusterCheckJobsInterval)
|
||||
defer checkJobs.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-checkJobs.C:
|
||||
w.checkJobs(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write the worker status
|
||||
func (w *Worker) writeStatus(ctx context.Context) {
|
||||
// Create the worker status from the jobIDs and the short stats
|
||||
status := WorkerStatus{
|
||||
ID: w.id,
|
||||
Running: make(map[string]rc.Params),
|
||||
Updated: time.Now(),
|
||||
Done: make(map[string]bool),
|
||||
}
|
||||
w.jobsMu.Lock()
|
||||
for _, jobID := range w.done {
|
||||
status.Done[jobID] = true
|
||||
}
|
||||
for jobID := range w.running {
|
||||
fs.Infof(nil, "read jobID %q", jobID)
|
||||
si := accounting.StatsGroup(ctx, jobID)
|
||||
out, err := si.RemoteStats(true)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: write status: stats: %v", err)
|
||||
status.Running[jobID] = rc.Params{}
|
||||
} else {
|
||||
status.Running[jobID] = out
|
||||
}
|
||||
status.Done[jobID] = false
|
||||
}
|
||||
w.jobsMu.Unlock()
|
||||
|
||||
// Write the stats to a file
|
||||
buf, err := json.MarshalIndent(status, "", "\t")
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: write status: json: %w", err)
|
||||
return
|
||||
}
|
||||
err = w.jobs.writeFile(ctx, w.status, status.Updated, buf)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: write status: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the worker status
|
||||
func (w *Worker) clearStatus(ctx context.Context) {
|
||||
err := w.jobs.removeFile(ctx, w.status)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "cluster: clear status: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run the background process to write status
|
||||
func (w *Worker) runStatus(ctx context.Context) {
|
||||
defer w.wg.Done()
|
||||
w.writeStatus(ctx)
|
||||
defer w.clearStatus(ctx)
|
||||
writeStatus := time.NewTicker(clusterWriteStatusInterval)
|
||||
defer writeStatus.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-writeStatus.C:
|
||||
t0 := time.Now()
|
||||
w.writeStatus(ctx)
|
||||
fs.Debugf(nil, "write status took %v at %v", time.Since(t0), t0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown the worker regardless of whether it has work to process or not.
|
||||
func (w *Worker) Shutdown(ctx context.Context) error {
|
||||
w.cancel()
|
||||
w.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
64
fs/config.go
64
fs/config.go
@@ -50,6 +50,34 @@ var (
|
||||
ConfigEdit = "config_fs_edit"
|
||||
)
|
||||
|
||||
// ClusterCleanup describes the cluster cleanup choices.
|
||||
type ClusterCleanup = Enum[clusterCleanupChoices]
|
||||
|
||||
// Cluster cleanup choices.
|
||||
//
|
||||
// ClusterCleanupNone don't remove any cluster files
|
||||
// ClusterCleanupCompleted remove successfully completed jobs
|
||||
// ClusterCleanupFull remove everything on exit
|
||||
const (
|
||||
ClusterCleanupNone ClusterCleanup = iota
|
||||
ClusterCleanupCompleted
|
||||
ClusterCleanupFull
|
||||
)
|
||||
|
||||
type clusterCleanupChoices struct{}
|
||||
|
||||
func (clusterCleanupChoices) Choices() []string {
|
||||
return []string{
|
||||
ClusterCleanupNone: "none",
|
||||
ClusterCleanupCompleted: "completed",
|
||||
ClusterCleanupFull: "full",
|
||||
}
|
||||
}
|
||||
|
||||
func (clusterCleanupChoices) Type() string {
|
||||
return "ClusterCleanup"
|
||||
}
|
||||
|
||||
// ConfigOptionsInfo describes the Options in use
|
||||
var ConfigOptionsInfo = Options{{
|
||||
Name: "modify_window",
|
||||
@@ -566,6 +594,36 @@ var ConfigOptionsInfo = Options{{
|
||||
Default: "",
|
||||
Help: "HTTP proxy URL.",
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "cluster",
|
||||
Default: "",
|
||||
Help: "Enable cluster mode with remote to use as shared storage.",
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "cluster_id",
|
||||
Default: "",
|
||||
Help: "Set to an ID for the cluster. An ID of 0 or empty becomes the controller.",
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "cluster_quit_workers",
|
||||
Default: false,
|
||||
Help: "Set to cause the controller to quit the workers when it finished.",
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "cluster_batch_files",
|
||||
Default: 1000,
|
||||
Help: "Max number of files for a cluster batch.",
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "cluster_batch_size",
|
||||
Default: Tebi,
|
||||
Help: "Max size of files for a cluster batch.",
|
||||
Groups: "Networking",
|
||||
}, {
|
||||
Name: "cluster_cleanup",
|
||||
Default: ClusterCleanupFull,
|
||||
Help: "Control which cluster files get cleaned up.",
|
||||
Groups: "Networking",
|
||||
}}
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
@@ -680,6 +738,12 @@ type ConfigInfo struct {
|
||||
MaxConnections int `config:"max_connections"`
|
||||
NameTransform []string `config:"name_transform"`
|
||||
HTTPProxy string `config:"http_proxy"`
|
||||
Cluster string `config:"cluster"`
|
||||
ClusterID string `config:"cluster_id"`
|
||||
ClusterQuitWorkers bool `config:"cluster_quit_workers"`
|
||||
ClusterBatchFiles int `config:"cluster_batch_files"`
|
||||
ClusterBatchSize SizeSuffix `config:"cluster_batch_size"`
|
||||
ClusterCleanup ClusterCleanup `config:"cluster_cleanup"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
//
|
||||
// It expects 1, 2 or 3 arguments
|
||||
//
|
||||
// rclone authorize "fs name"
|
||||
// rclone authorize "fs name" "base64 encoded JSON blob"
|
||||
// rclone authorize "fs name" "client id" "client secret"
|
||||
// rclone authorize "backend name"
|
||||
// rclone authorize "backend name" "base64 encoded JSON blob"
|
||||
// rclone authorize "backend name" "client id" "client secret"
|
||||
func Authorize(ctx context.Context, args []string, noAutoBrowser bool, templateFile string) error {
|
||||
ctx = suppressConfirm(ctx)
|
||||
ctx = fs.ConfigOAuthOnly(ctx)
|
||||
|
||||
@@ -136,9 +136,11 @@ func (c Simple) Set(key, value string) {
|
||||
c[key] = value
|
||||
}
|
||||
|
||||
// String the map value the same way the config parser does, but with
|
||||
// string the map value the same way the config parser does, but with
|
||||
// sorted keys for reproducibility.
|
||||
func (c Simple) String() string {
|
||||
//
|
||||
// If human is set then use fewer quotes.
|
||||
func (c Simple) string(human bool) string {
|
||||
var ks = make([]string, 0, len(c))
|
||||
for k := range c {
|
||||
ks = append(ks, k)
|
||||
@@ -150,20 +152,41 @@ func (c Simple) String() string {
|
||||
out.WriteRune(',')
|
||||
}
|
||||
out.WriteString(k)
|
||||
out.WriteRune('=')
|
||||
out.WriteRune('\'')
|
||||
for _, ch := range c[k] {
|
||||
out.WriteRune(ch)
|
||||
// Escape ' as ''
|
||||
if ch == '\'' {
|
||||
out.WriteRune(ch)
|
||||
}
|
||||
v := c[k]
|
||||
if human && v == "true" {
|
||||
continue
|
||||
}
|
||||
out.WriteRune('=')
|
||||
if !human || strings.ContainsAny(v, `'":=,`) {
|
||||
out.WriteRune('\'')
|
||||
for _, ch := range v {
|
||||
out.WriteRune(ch)
|
||||
// Escape ' as ''
|
||||
if ch == '\'' {
|
||||
out.WriteRune(ch)
|
||||
}
|
||||
}
|
||||
out.WriteRune('\'')
|
||||
} else {
|
||||
out.WriteString(v)
|
||||
}
|
||||
out.WriteRune('\'')
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
|
||||
// Human converts the map value the same way the config parser does,
|
||||
// but with sorted keys for reproducibility. This does it in human
|
||||
// readable form with fewer quotes.
|
||||
func (c Simple) Human() string {
|
||||
return c.string(true)
|
||||
}
|
||||
|
||||
// String the map value the same way the config parser does, but with
|
||||
// sorted keys for reproducibility.
|
||||
func (c Simple) String() string {
|
||||
return c.string(false)
|
||||
}
|
||||
|
||||
// Encode from c into a string suitable for putting on the command line
|
||||
func (c Simple) Encode() (string, error) {
|
||||
if len(c) == 0 {
|
||||
|
||||
121
fs/config/configmap/configmap_external_test.go
Normal file
121
fs/config/configmap/configmap_external_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package configmap_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSimpleString(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
want string
|
||||
in configmap.Simple
|
||||
}{
|
||||
{name: "Nil", want: "", in: configmap.Simple(nil)},
|
||||
{name: "Empty", want: "", in: configmap.Simple{}},
|
||||
{name: "Basic", want: "config1='one'", in: configmap.Simple{
|
||||
"config1": "one",
|
||||
}},
|
||||
{name: "Truthy", want: "config1='true',config2='true'", in: configmap.Simple{
|
||||
"config1": "true",
|
||||
"config2": "true",
|
||||
}},
|
||||
{name: "Quotable", want: `config1='"one"',config2=':two:',config3='''three''',config4='=four=',config5=',five,'`, in: configmap.Simple{
|
||||
"config1": `"one"`,
|
||||
"config2": `:two:`,
|
||||
"config3": `'three'`,
|
||||
"config4": `=four=`,
|
||||
"config5": `,five,`,
|
||||
}},
|
||||
{name: "Order", want: "config1='one',config2='two',config3='three',config4='four',config5='five'", in: configmap.Simple{
|
||||
"config5": "five",
|
||||
"config4": "four",
|
||||
"config3": "three",
|
||||
"config2": "two",
|
||||
"config1": "one",
|
||||
}},
|
||||
{name: "Escaping", want: "apple='',config1='o''n''e'", in: configmap.Simple{
|
||||
"config1": "o'n'e",
|
||||
"apple": "",
|
||||
}},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Check forwards
|
||||
params := tt.in.String()
|
||||
assert.Equal(t, tt.want, params)
|
||||
|
||||
// Check config round trips through config parser
|
||||
remote := ":local," + params + ":"
|
||||
if params == "" {
|
||||
remote = ":local:"
|
||||
}
|
||||
what := fmt.Sprintf("remote = %q", remote)
|
||||
parsed, err := fspath.Parse(remote)
|
||||
require.NoError(t, err, what)
|
||||
if len(parsed.Config) != 0 || len(tt.in) != 0 {
|
||||
assert.Equal(t, tt.in, parsed.Config, what)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSimpleHuman(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
want string
|
||||
in configmap.Simple
|
||||
}{
|
||||
{name: "Nil", want: "", in: configmap.Simple(nil)},
|
||||
{name: "Empty", want: "", in: configmap.Simple{}},
|
||||
{name: "Basic", want: "config1=one", in: configmap.Simple{
|
||||
"config1": "one",
|
||||
}},
|
||||
{name: "Truthy", want: "config1,config2", in: configmap.Simple{
|
||||
"config1": "true",
|
||||
"config2": "true",
|
||||
}},
|
||||
{name: "Quotable", want: `config1='"one"',config2=':two:',config3='''three''',config4='=four=',config5=',five,'`, in: configmap.Simple{
|
||||
"config1": `"one"`,
|
||||
"config2": `:two:`,
|
||||
"config3": `'three'`,
|
||||
"config4": `=four=`,
|
||||
"config5": `,five,`,
|
||||
}},
|
||||
{name: "Order", want: "config1=one,config2=two,config3=three,config4=four,config5=five", in: configmap.Simple{
|
||||
"config5": "five",
|
||||
"config4": "four",
|
||||
"config3": "three",
|
||||
"config2": "two",
|
||||
"config1": "one",
|
||||
}},
|
||||
{name: "Escaping", want: "apple=,config1='o''n''e'", in: configmap.Simple{
|
||||
"config1": "o'n'e",
|
||||
"apple": "",
|
||||
}},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Check forwards
|
||||
params := tt.in.Human()
|
||||
assert.Equal(t, tt.want, params)
|
||||
|
||||
// Check config round trips through config parser
|
||||
remote := ":local," + params + ":"
|
||||
if params == "" {
|
||||
remote = ":local:"
|
||||
}
|
||||
what := fmt.Sprintf("remote = %q", remote)
|
||||
parsed, err := fspath.Parse(remote)
|
||||
require.NoError(t, err, what)
|
||||
if len(parsed.Config) != 0 || len(tt.in) != 0 {
|
||||
assert.Equal(t, tt.in, parsed.Config, what)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
@@ -246,30 +246,6 @@ func TestConfigMapClearSetters(t *testing.T) {
|
||||
assert.Equal(t, []Setter(nil), m.setters)
|
||||
}
|
||||
|
||||
func TestSimpleString(t *testing.T) {
|
||||
// Basic
|
||||
assert.Equal(t, "", Simple(nil).String())
|
||||
assert.Equal(t, "", Simple{}.String())
|
||||
assert.Equal(t, "config1='one'", Simple{
|
||||
"config1": "one",
|
||||
}.String())
|
||||
|
||||
// Check ordering
|
||||
assert.Equal(t, "config1='one',config2='two',config3='three',config4='four',config5='five'", Simple{
|
||||
"config5": "five",
|
||||
"config4": "four",
|
||||
"config3": "three",
|
||||
"config2": "two",
|
||||
"config1": "one",
|
||||
}.String())
|
||||
|
||||
// Check escaping
|
||||
assert.Equal(t, "apple='',config1='o''n''e'", Simple{
|
||||
"config1": "o'n'e",
|
||||
"apple": "",
|
||||
}.String())
|
||||
}
|
||||
|
||||
func TestSimpleEncode(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in Simple
|
||||
|
||||
324
fs/fs_test.go
324
fs/fs_test.go
@@ -2,21 +2,9 @@ package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -90,315 +78,3 @@ func TestFeaturesDisableList(t *testing.T) {
|
||||
assert.False(t, ft.CaseInsensitive)
|
||||
assert.False(t, ft.DuplicateFiles)
|
||||
}
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*Option)(nil)
|
||||
|
||||
func TestOption(t *testing.T) {
|
||||
d := &Option{
|
||||
Name: "potato",
|
||||
Value: SizeSuffix(17 << 20),
|
||||
}
|
||||
assert.Equal(t, "17Mi", d.String())
|
||||
assert.Equal(t, "SizeSuffix", d.Type())
|
||||
err := d.Set("18M")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
||||
err = d.Set("sdfsdf")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
var errFoo = errors.New("foo")
|
||||
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
wait *sync.Cond
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
if dp.wait != nil {
|
||||
dp.wait.L.Lock()
|
||||
dp.wait.Wait()
|
||||
dp.wait.L.Unlock()
|
||||
}
|
||||
dp.called++
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func TestPacerCall(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
config := GetConfig(ctx)
|
||||
expectedCalled := config.LowLevelRetries
|
||||
if expectedCalled == 0 {
|
||||
ctx, config = AddConfig(ctx)
|
||||
expectedCalled = 20
|
||||
config.LowLevelRetries = expectedCalled
|
||||
}
|
||||
p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
require.Equal(t, expectedCalled, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
func TestPacerCallNoRetry(t *testing.T) {
|
||||
p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
require.Equal(t, 1, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
// Test options
|
||||
var (
|
||||
nouncOption = Option{
|
||||
Name: "nounc",
|
||||
}
|
||||
copyLinksOption = Option{
|
||||
Name: "copy_links",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}
|
||||
caseInsensitiveOption = Option{
|
||||
Name: "case_insensitive",
|
||||
Default: false,
|
||||
Value: true,
|
||||
Advanced: true,
|
||||
}
|
||||
testOptions = Options{nouncOption, copyLinksOption, caseInsensitiveOption}
|
||||
)
|
||||
|
||||
func TestOptionsSetValues(t *testing.T) {
|
||||
assert.Nil(t, testOptions[0].Default)
|
||||
assert.Equal(t, false, testOptions[1].Default)
|
||||
assert.Equal(t, false, testOptions[2].Default)
|
||||
testOptions.setValues()
|
||||
assert.Equal(t, "", testOptions[0].Default)
|
||||
assert.Equal(t, false, testOptions[1].Default)
|
||||
assert.Equal(t, false, testOptions[2].Default)
|
||||
}
|
||||
|
||||
func TestOptionsGet(t *testing.T) {
|
||||
opt := testOptions.Get("copy_links")
|
||||
assert.Equal(t, ©LinksOption, opt)
|
||||
opt = testOptions.Get("not_found")
|
||||
assert.Nil(t, opt)
|
||||
}
|
||||
|
||||
func TestOptionsOveridden(t *testing.T) {
|
||||
m := configmap.New()
|
||||
m1 := configmap.Simple{
|
||||
"nounc": "m1",
|
||||
"copy_links": "m1",
|
||||
}
|
||||
m.AddGetter(m1, configmap.PriorityNormal)
|
||||
m2 := configmap.Simple{
|
||||
"nounc": "m2",
|
||||
"case_insensitive": "m2",
|
||||
}
|
||||
m.AddGetter(m2, configmap.PriorityConfig)
|
||||
m3 := configmap.Simple{
|
||||
"nounc": "m3",
|
||||
}
|
||||
m.AddGetter(m3, configmap.PriorityDefault)
|
||||
got := testOptions.Overridden(m)
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"copy_links": "m1",
|
||||
"nounc": "m1",
|
||||
}, got)
|
||||
}
|
||||
|
||||
func TestOptionsNonDefault(t *testing.T) {
|
||||
m := configmap.Simple{}
|
||||
got := testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{}, got)
|
||||
|
||||
m["case_insensitive"] = "false"
|
||||
got = testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{}, got)
|
||||
|
||||
m["case_insensitive"] = "true"
|
||||
got = testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{"case_insensitive": "true"}, got)
|
||||
}
|
||||
|
||||
func TestOptionMarshalJSON(t *testing.T) {
|
||||
out, err := json.MarshalIndent(&caseInsensitiveOption, "", "")
|
||||
assert.NoError(t, err)
|
||||
require.Equal(t, `{
|
||||
"Name": "case_insensitive",
|
||||
"FieldName": "",
|
||||
"Help": "",
|
||||
"Default": false,
|
||||
"Value": true,
|
||||
"Hide": 0,
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"NoPrefix": false,
|
||||
"Advanced": true,
|
||||
"Exclusive": false,
|
||||
"Sensitive": false,
|
||||
"DefaultStr": "false",
|
||||
"ValueStr": "true",
|
||||
"Type": "bool"
|
||||
}`, string(out))
|
||||
}
|
||||
|
||||
func TestOptionGetValue(t *testing.T) {
|
||||
assert.Equal(t, "", nouncOption.GetValue())
|
||||
assert.Equal(t, false, copyLinksOption.GetValue())
|
||||
assert.Equal(t, true, caseInsensitiveOption.GetValue())
|
||||
}
|
||||
|
||||
func TestOptionString(t *testing.T) {
|
||||
assert.Equal(t, "", nouncOption.String())
|
||||
assert.Equal(t, "false", copyLinksOption.String())
|
||||
assert.Equal(t, "true", caseInsensitiveOption.String())
|
||||
}
|
||||
|
||||
func TestOptionStringStringArray(t *testing.T) {
|
||||
opt := Option{
|
||||
Name: "string_array",
|
||||
Default: []string(nil),
|
||||
}
|
||||
assert.Equal(t, "", opt.String())
|
||||
opt.Default = []string{}
|
||||
assert.Equal(t, "", opt.String())
|
||||
opt.Default = []string{"a", "b"}
|
||||
assert.Equal(t, "a,b", opt.String())
|
||||
opt.Default = []string{"hello, world!", "goodbye, world!"}
|
||||
assert.Equal(t, `"hello, world!","goodbye, world!"`, opt.String())
|
||||
}
|
||||
|
||||
func TestOptionStringSizeSuffix(t *testing.T) {
|
||||
opt := Option{
|
||||
Name: "size_suffix",
|
||||
Default: SizeSuffix(0),
|
||||
}
|
||||
assert.Equal(t, "0", opt.String())
|
||||
opt.Default = SizeSuffix(-1)
|
||||
assert.Equal(t, "off", opt.String())
|
||||
opt.Default = SizeSuffix(100)
|
||||
assert.Equal(t, "100B", opt.String())
|
||||
opt.Default = SizeSuffix(1024)
|
||||
assert.Equal(t, "1Ki", opt.String())
|
||||
}
|
||||
|
||||
func TestOptionSet(t *testing.T) {
|
||||
o := caseInsensitiveOption
|
||||
assert.Equal(t, true, o.Value)
|
||||
err := o.Set("FALSE")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, o.Value)
|
||||
|
||||
o = copyLinksOption
|
||||
assert.Equal(t, nil, o.Value)
|
||||
err = o.Set("True")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, o.Value)
|
||||
|
||||
err = o.Set("INVALID")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, true, o.Value)
|
||||
}
|
||||
|
||||
func TestOptionType(t *testing.T) {
|
||||
assert.Equal(t, "string", nouncOption.Type())
|
||||
assert.Equal(t, "bool", copyLinksOption.Type())
|
||||
assert.Equal(t, "bool", caseInsensitiveOption.Type())
|
||||
}
|
||||
|
||||
func TestOptionFlagName(t *testing.T) {
|
||||
assert.Equal(t, "local-nounc", nouncOption.FlagName("local"))
|
||||
assert.Equal(t, "copy-links", copyLinksOption.FlagName("local"))
|
||||
assert.Equal(t, "local-case-insensitive", caseInsensitiveOption.FlagName("local"))
|
||||
}
|
||||
|
||||
func TestOptionEnvVarName(t *testing.T) {
|
||||
assert.Equal(t, "RCLONE_LOCAL_NOUNC", nouncOption.EnvVarName("local"))
|
||||
assert.Equal(t, "RCLONE_LOCAL_COPY_LINKS", copyLinksOption.EnvVarName("local"))
|
||||
assert.Equal(t, "RCLONE_LOCAL_CASE_INSENSITIVE", caseInsensitiveOption.EnvVarName("local"))
|
||||
}
|
||||
|
||||
func TestOptionGetters(t *testing.T) {
|
||||
// Set up env vars
|
||||
envVars := [][2]string{
|
||||
{"RCLONE_CONFIG_LOCAL_POTATO_PIE", "yes"},
|
||||
{"RCLONE_COPY_LINKS", "TRUE"},
|
||||
{"RCLONE_LOCAL_NOUNC", "NOUNC"},
|
||||
}
|
||||
for _, ev := range envVars {
|
||||
assert.NoError(t, os.Setenv(ev[0], ev[1]))
|
||||
}
|
||||
defer func() {
|
||||
for _, ev := range envVars {
|
||||
assert.NoError(t, os.Unsetenv(ev[0]))
|
||||
}
|
||||
}()
|
||||
|
||||
oldConfigFileGet := ConfigFileGet
|
||||
ConfigFileGet = func(section, key string) (string, bool) {
|
||||
if section == "sausage" && key == "key1" {
|
||||
return "value1", true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
defer func() {
|
||||
ConfigFileGet = oldConfigFileGet
|
||||
}()
|
||||
|
||||
// set up getters
|
||||
|
||||
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
|
||||
configEnvVarsGetter := configEnvVars("local")
|
||||
|
||||
// A configmap.Getter to read from the environment RCLONE_option_name
|
||||
optionEnvVarsGetter := optionEnvVars{"local", testOptions}
|
||||
|
||||
// A configmap.Getter to read either the default value or the set
|
||||
// value from the RegInfo.Options
|
||||
regInfoValuesGetterFalse := ®InfoValues{
|
||||
options: testOptions,
|
||||
useDefault: false,
|
||||
}
|
||||
regInfoValuesGetterTrue := ®InfoValues{
|
||||
options: testOptions,
|
||||
useDefault: true,
|
||||
}
|
||||
|
||||
// A configmap.Setter to read from the config file
|
||||
configFileGetter := getConfigFile("sausage")
|
||||
|
||||
for i, test := range []struct {
|
||||
get configmap.Getter
|
||||
key string
|
||||
wantValue string
|
||||
wantOk bool
|
||||
}{
|
||||
{configEnvVarsGetter, "not_found", "", false},
|
||||
{configEnvVarsGetter, "potato_pie", "yes", true},
|
||||
{optionEnvVarsGetter, "not_found", "", false},
|
||||
{optionEnvVarsGetter, "copy_links", "TRUE", true},
|
||||
{optionEnvVarsGetter, "nounc", "NOUNC", true},
|
||||
{optionEnvVarsGetter, "case_insensitive", "", false},
|
||||
{regInfoValuesGetterFalse, "not_found", "", false},
|
||||
{regInfoValuesGetterFalse, "case_insensitive", "true", true},
|
||||
{regInfoValuesGetterFalse, "copy_links", "", false},
|
||||
{regInfoValuesGetterTrue, "not_found", "", false},
|
||||
{regInfoValuesGetterTrue, "case_insensitive", "true", true},
|
||||
{regInfoValuesGetterTrue, "copy_links", "false", true},
|
||||
{configFileGetter, "not_found", "", false},
|
||||
{configFileGetter, "key1", "value1", true},
|
||||
} {
|
||||
what := fmt.Sprintf("%d: %+v: %q", i, test.get, test.key)
|
||||
gotValue, gotOk := test.get.Get(test.key)
|
||||
assert.Equal(t, test.wantValue, gotValue, what)
|
||||
assert.Equal(t, test.wantOk, gotOk, what)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
57
fs/pacer_test.go
Normal file
57
fs/pacer_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var errFoo = errors.New("foo")
|
||||
|
||||
type dummyPaced struct {
|
||||
retry bool
|
||||
called int
|
||||
wait *sync.Cond
|
||||
}
|
||||
|
||||
func (dp *dummyPaced) fn() (bool, error) {
|
||||
if dp.wait != nil {
|
||||
dp.wait.L.Lock()
|
||||
dp.wait.Wait()
|
||||
dp.wait.L.Unlock()
|
||||
}
|
||||
dp.called++
|
||||
return dp.retry, errFoo
|
||||
}
|
||||
|
||||
func TestPacerCall(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
config := GetConfig(ctx)
|
||||
expectedCalled := config.LowLevelRetries
|
||||
if expectedCalled == 0 {
|
||||
ctx, config = AddConfig(ctx)
|
||||
expectedCalled = 20
|
||||
config.LowLevelRetries = expectedCalled
|
||||
}
|
||||
p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.Call(dp.fn)
|
||||
require.Equal(t, expectedCalled, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
|
||||
func TestPacerCallNoRetry(t *testing.T) {
|
||||
p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||
|
||||
dp := &dummyPaced{retry: true}
|
||||
err := p.CallNoRetry(dp.fn)
|
||||
require.Equal(t, 1, dp.called)
|
||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||
}
|
||||
@@ -2,11 +2,15 @@
|
||||
package jobs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -17,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Fill in these to avoid circular dependencies
|
||||
@@ -475,3 +480,249 @@ func rcGroupStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NewJobFromParams creates an rc job rc.Params.
|
||||
//
|
||||
// The JSON blob should contain a _path entry.
|
||||
//
|
||||
// It returns a rc.Params as output which may be an error.
|
||||
func NewJobFromParams(ctx context.Context, in rc.Params) (out rc.Params) {
|
||||
path := "unknown"
|
||||
|
||||
// Return an rc error blob
|
||||
rcError := func(err error, status int) rc.Params {
|
||||
fs.Errorf(nil, "rc: %q: error: %v", path, err)
|
||||
out, _ = rc.Error(path, in, err, status)
|
||||
return out
|
||||
}
|
||||
|
||||
// Find the call
|
||||
path, err := in.GetString("_path")
|
||||
if err != nil {
|
||||
return rcError(err, http.StatusNotFound)
|
||||
}
|
||||
delete(in, "_path")
|
||||
call := rc.Calls.Get(path)
|
||||
if call == nil {
|
||||
return rcError(fmt.Errorf("couldn't find path %q", path), http.StatusNotFound)
|
||||
}
|
||||
if call.NeedsRequest {
|
||||
return rcError(fmt.Errorf("can't run path %q as it needs the request", path), http.StatusBadRequest)
|
||||
}
|
||||
if call.NeedsResponse {
|
||||
return rcError(fmt.Errorf("can't run path %q as it needs the response", path), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// Pass on the group if one is set in the context and it isn't set in the input.
|
||||
if _, found := in["_group"]; !found {
|
||||
group, ok := accounting.StatsGroupFromContext(ctx)
|
||||
if ok {
|
||||
in["_group"] = group
|
||||
}
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
|
||||
_, out, err = NewJob(ctx, call.Fn, in)
|
||||
if err != nil {
|
||||
return rcError(err, http.StatusInternalServerError)
|
||||
}
|
||||
if out == nil {
|
||||
out = make(rc.Params)
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
|
||||
return out
|
||||
}
|
||||
|
||||
// NewJobFromBytes creates an rc job from a JSON blob as bytes.
|
||||
//
|
||||
// The JSON blob should contain a _path entry.
|
||||
//
|
||||
// It returns a JSON blob as output which may be an error.
|
||||
func NewJobFromBytes(ctx context.Context, inBuf []byte) (outBuf []byte) {
|
||||
var in rc.Params
|
||||
var out rc.Params
|
||||
|
||||
// Parse a JSON blob from the input
|
||||
err := json.Unmarshal(inBuf, &in)
|
||||
if err != nil {
|
||||
out, _ = rc.Error("unknown", in, err, http.StatusBadRequest)
|
||||
} else {
|
||||
out = NewJobFromParams(ctx, in)
|
||||
}
|
||||
|
||||
var w bytes.Buffer
|
||||
err = rc.WriteJSON(&w, out)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "rc: NewJobFromBytes: failed to write JSON output: %v", err)
|
||||
return []byte(`{"error":"failed to write JSON output"}`)
|
||||
}
|
||||
return w.Bytes()
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "job/batch",
|
||||
AuthRequired: true, // require auth always since sub commands may require it
|
||||
Fn: rcBatch,
|
||||
Title: "Run a batch of rclone rc commands concurrently.",
|
||||
Help: strings.ReplaceAll(`
|
||||
This takes the following parameters:
|
||||
|
||||
- concurrency - int - do this many commands concurrently. Defaults to |--transfers| if not set.
|
||||
- inputs - an list of inputs to the commands with an extra |_path| parameter
|
||||
|
||||
|||json
|
||||
{
|
||||
"_path": "rc/path",
|
||||
"param1": "parameter for the path as documented",
|
||||
"param2": "parameter for the path as documented, etc",
|
||||
}
|
||||
|||json
|
||||
|
||||
The inputs may use |_async|, |_group|, |_config| and |_filter| as normal when using the rc.
|
||||
|
||||
Returns:
|
||||
|
||||
- results - a list of results from the commands with one entry for each in inputs.
|
||||
|
||||
For example:
|
||||
|
||||
|||sh
|
||||
rclone rc job/batch --json '{
|
||||
"inputs": [
|
||||
{
|
||||
"_path": "rc/noop",
|
||||
"parameter": "OK"
|
||||
},
|
||||
{
|
||||
"_path": "rc/error",
|
||||
"parameter": "BAD"
|
||||
}
|
||||
]
|
||||
}
|
||||
'
|
||||
|||
|
||||
|
||||
Gives the result:
|
||||
|
||||
|||json
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"parameter": "OK"
|
||||
},
|
||||
{
|
||||
"error": "arbitrary error on input map[parameter:BAD]",
|
||||
"input": {
|
||||
"parameter": "BAD"
|
||||
},
|
||||
"path": "rc/error",
|
||||
"status": 500
|
||||
}
|
||||
]
|
||||
}
|
||||
|||
|
||||
`, "|", "`"),
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
// Run a single batch job
|
||||
func runBatchJob(ctx context.Context, inputAny any) (out rc.Params, err error) {
|
||||
var in rc.Params
|
||||
path := "unknown"
|
||||
defer func() {
|
||||
if err != nil {
|
||||
out, _ = rc.Error(path, in, err, http.StatusInternalServerError)
|
||||
}
|
||||
}()
|
||||
|
||||
// get the inputs to the job
|
||||
input, ok := inputAny.(map[string]any)
|
||||
if !ok {
|
||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("\"inputs\" items must be objects not %T", inputAny))
|
||||
}
|
||||
in = rc.Params(input)
|
||||
path, err = in.GetString("_path")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(in, "_path")
|
||||
call := rc.Calls.Get(path)
|
||||
|
||||
// Check call
|
||||
if call == nil {
|
||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("path %q does not exist", path))
|
||||
}
|
||||
path = call.Path
|
||||
if call.NeedsRequest {
|
||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("can't run path %q as it needs the request", path))
|
||||
}
|
||||
if call.NeedsResponse {
|
||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("can't run path %q as it needs the response", path))
|
||||
}
|
||||
|
||||
// Run the job
|
||||
_, out, err = NewJob(ctx, call.Fn, in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reshape (serialize then deserialize) the data so it is in the form expected
|
||||
err = rc.Reshape(&out, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
*/
|
||||
|
||||
// Batch the registered commands
|
||||
func rcBatch(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
|
||||
// Read inputs
|
||||
inputsAny, err := in.Get("inputs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputs, ok := inputsAny.([]any)
|
||||
if !ok {
|
||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("expecting list key %q (was %T)", "inputs", inputsAny))
|
||||
}
|
||||
|
||||
// Read concurrency
|
||||
concurrency, err := in.GetInt64("concurrency")
|
||||
if rc.IsErrParamNotFound(err) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
concurrency = int64(ci.Transfers)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare outputs
|
||||
results := make([]rc.Params, len(inputs))
|
||||
out["results"] = results
|
||||
|
||||
g, gCtx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(int(concurrency))
|
||||
for i, inputAny := range inputs {
|
||||
input, ok := inputAny.(map[string]any)
|
||||
if !ok {
|
||||
results[i], _ = rc.Error("unknown", nil, fmt.Errorf("\"inputs\" items must be objects not %T", inputAny), http.StatusBadRequest)
|
||||
continue
|
||||
}
|
||||
in := rc.Params(input)
|
||||
if concurrency <= 1 {
|
||||
results[i] = NewJobFromParams(ctx, in)
|
||||
} else {
|
||||
g.Go(func() error {
|
||||
results[i] = NewJobFromParams(gCtx, in)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
_ = g.Wait()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"runtime"
|
||||
"testing"
|
||||
@@ -602,3 +603,294 @@ func TestOnFinishDataRace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Register some test rc calls
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "test/needs_request",
|
||||
NeedsRequest: true,
|
||||
})
|
||||
rc.Add(rc.Call{
|
||||
Path: "test/needs_response",
|
||||
NeedsResponse: true,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestNewJobFromParams(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for _, test := range []struct {
|
||||
in rc.Params
|
||||
want rc.Params
|
||||
}{{
|
||||
in: rc.Params{
|
||||
"_path": "rc/noop",
|
||||
"a": "potato",
|
||||
},
|
||||
want: rc.Params{
|
||||
"a": "potato",
|
||||
},
|
||||
}, {
|
||||
in: rc.Params{
|
||||
"_path": "rc/noop",
|
||||
"b": "sausage",
|
||||
},
|
||||
want: rc.Params{
|
||||
"b": "sausage",
|
||||
},
|
||||
}, {
|
||||
in: rc.Params{
|
||||
"_path": "rc/error",
|
||||
"e": "sausage",
|
||||
},
|
||||
want: rc.Params{
|
||||
"error": "arbitrary error on input map[e:sausage]",
|
||||
"input": rc.Params{
|
||||
"e": "sausage",
|
||||
},
|
||||
"path": "rc/error",
|
||||
"status": 500,
|
||||
},
|
||||
}, {
|
||||
in: rc.Params{
|
||||
"_path": "bad/path",
|
||||
"param": "sausage",
|
||||
},
|
||||
want: rc.Params{
|
||||
"error": "couldn't find path \"bad/path\"",
|
||||
"input": rc.Params{
|
||||
"param": "sausage",
|
||||
},
|
||||
"path": "bad/path",
|
||||
"status": 404,
|
||||
},
|
||||
}, {
|
||||
in: rc.Params{
|
||||
"_path": "test/needs_request",
|
||||
},
|
||||
want: rc.Params{
|
||||
"error": "can't run path \"test/needs_request\" as it needs the request",
|
||||
"input": rc.Params{},
|
||||
"path": "test/needs_request",
|
||||
"status": 400,
|
||||
},
|
||||
}, {
|
||||
in: rc.Params{
|
||||
"_path": "test/needs_response",
|
||||
},
|
||||
want: rc.Params{
|
||||
"error": "can't run path \"test/needs_response\" as it needs the response",
|
||||
"input": rc.Params{},
|
||||
"path": "test/needs_response",
|
||||
"status": 400,
|
||||
},
|
||||
}, {
|
||||
in: rc.Params{
|
||||
"nopath": "BOOM",
|
||||
},
|
||||
want: rc.Params{
|
||||
"error": "Didn't find key \"_path\" in input",
|
||||
"input": rc.Params{
|
||||
"nopath": "BOOM",
|
||||
},
|
||||
"path": "",
|
||||
"status": 400,
|
||||
},
|
||||
}} {
|
||||
got := NewJobFromParams(ctx, test.in)
|
||||
assert.Equal(t, test.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewJobFromBytes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want string
|
||||
}{{
|
||||
in: `{
|
||||
"_path": "rc/noop",
|
||||
"a": "potato"
|
||||
}`,
|
||||
want: `{
|
||||
"a": "potato"
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `{
|
||||
"_path": "rc/error",
|
||||
"e": "sausage"
|
||||
}`,
|
||||
want: `{
|
||||
"error": "arbitrary error on input map[e:sausage]",
|
||||
"input": {
|
||||
"e": "sausage"
|
||||
},
|
||||
"path": "rc/error",
|
||||
"status": 500
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `parse error`,
|
||||
want: `{
|
||||
"error": "invalid character 'p' looking for beginning of value",
|
||||
"input": null,
|
||||
"path": "unknown",
|
||||
"status": 400
|
||||
}
|
||||
`,
|
||||
}, {
|
||||
in: `"just a string"`,
|
||||
want: `{
|
||||
"error": "json: cannot unmarshal string into Go value of type rc.Params",
|
||||
"input": null,
|
||||
"path": "unknown",
|
||||
"status": 400
|
||||
}
|
||||
`,
|
||||
}} {
|
||||
got := NewJobFromBytes(ctx, []byte(test.in))
|
||||
assert.Equal(t, test.want, string(got))
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobsBatch(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
call := rc.Calls.Get("job/batch")
|
||||
assert.NotNil(t, call)
|
||||
|
||||
inJSON := `{
|
||||
"inputs": [
|
||||
{
|
||||
"_path": "rc/noop",
|
||||
"a": "potato"
|
||||
},
|
||||
"bad string",
|
||||
{
|
||||
"_path": "rc/noop",
|
||||
"b": "sausage"
|
||||
},
|
||||
{
|
||||
"_path": "rc/error",
|
||||
"e": "sausage"
|
||||
},
|
||||
{
|
||||
"_path": "bad/path",
|
||||
"param": "sausage"
|
||||
},
|
||||
{
|
||||
"_path": "test/needs_request"
|
||||
},
|
||||
{
|
||||
"_path": "test/needs_response"
|
||||
},
|
||||
{
|
||||
"nopath": "BOOM"
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
var in rc.Params
|
||||
require.NoError(t, json.Unmarshal([]byte(inJSON), &in))
|
||||
|
||||
wantJSON := `{
|
||||
"results": [
|
||||
{
|
||||
"a": "potato"
|
||||
},
|
||||
{
|
||||
"error": "\"inputs\" items must be objects not string",
|
||||
"input": null,
|
||||
"path": "unknown",
|
||||
"status": 400
|
||||
},
|
||||
{
|
||||
"b": "sausage"
|
||||
},
|
||||
{
|
||||
"error": "arbitrary error on input map[e:sausage]",
|
||||
"input": {
|
||||
"e": "sausage"
|
||||
},
|
||||
"path": "rc/error",
|
||||
"status": 500
|
||||
},
|
||||
{
|
||||
"error": "couldn't find path \"bad/path\"",
|
||||
"input": {
|
||||
"param": "sausage"
|
||||
},
|
||||
"path": "bad/path",
|
||||
"status": 404
|
||||
},
|
||||
{
|
||||
"error": "can't run path \"test/needs_request\" as it needs the request",
|
||||
"input": {},
|
||||
"path": "test/needs_request",
|
||||
"status": 400
|
||||
},
|
||||
{
|
||||
"error": "can't run path \"test/needs_response\" as it needs the response",
|
||||
"input": {},
|
||||
"path": "test/needs_response",
|
||||
"status": 400
|
||||
},
|
||||
{
|
||||
"error": "Didn't find key \"_path\" in input",
|
||||
"input": {
|
||||
"nopath": "BOOM"
|
||||
},
|
||||
"path": "",
|
||||
"status": 400
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
var want rc.Params
|
||||
require.NoError(t, json.Unmarshal([]byte(wantJSON), &want))
|
||||
|
||||
out, err := call.Fn(ctx, in)
|
||||
require.NoError(t, err)
|
||||
|
||||
var got rc.Params
|
||||
require.NoError(t, rc.Reshape(&got, out))
|
||||
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
func TestJobsBatchConcurrent(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
for concurrency := range 10 {
|
||||
in := rc.Params{}
|
||||
var inputs []any
|
||||
var results []rc.Params
|
||||
for i := range 100 {
|
||||
in := map[string]any{
|
||||
"_path": "rc/noop",
|
||||
"i": i,
|
||||
}
|
||||
inputs = append(inputs, in)
|
||||
results = append(results, rc.Params{
|
||||
"i": i,
|
||||
})
|
||||
}
|
||||
in["inputs"] = inputs
|
||||
want := rc.Params{
|
||||
"results": results,
|
||||
}
|
||||
|
||||
if concurrency > 0 {
|
||||
in["concurrency"] = concurrency
|
||||
}
|
||||
call := rc.Calls.Get("job/batch")
|
||||
assert.NotNil(t, call)
|
||||
|
||||
got, err := call.Fn(ctx, in)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, want, got)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -154,6 +154,37 @@ func (os Options) NonDefault(m configmap.Getter) configmap.Simple {
|
||||
return nonDefault
|
||||
}
|
||||
|
||||
// NonDefaultRC discovers which config values aren't at their default
|
||||
//
|
||||
// It expects a pointer to the current config struct in opts.
|
||||
//
|
||||
// It returns the overridden config in rc config format.
|
||||
func (os Options) NonDefaultRC(opts any) (map[string]any, error) {
|
||||
items, err := configstruct.Items(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
itemsByName := map[string]*configstruct.Item{}
|
||||
for i := range items {
|
||||
item := &items[i]
|
||||
itemsByName[item.Name] = item
|
||||
}
|
||||
var nonDefault = map[string]any{}
|
||||
for i := range os {
|
||||
opt := &os[i]
|
||||
item, found := itemsByName[opt.Name]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("key %q in OptionsInfo not found in Options struct", opt.Name)
|
||||
}
|
||||
value := fmt.Sprint(item.Value)
|
||||
defaultValue := fmt.Sprint(opt.Default)
|
||||
if value != defaultValue {
|
||||
nonDefault[item.Field] = item.Value
|
||||
}
|
||||
}
|
||||
return nonDefault, nil
|
||||
}
|
||||
|
||||
// HasAdvanced discovers if any options have an Advanced setting
|
||||
func (os Options) HasAdvanced() bool {
|
||||
for i := range os {
|
||||
|
||||
308
fs/registry_test.go
Normal file
308
fs/registry_test.go
Normal file
@@ -0,0 +1,308 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check it satisfies the interface
|
||||
var _ pflag.Value = (*Option)(nil)
|
||||
|
||||
func TestOption(t *testing.T) {
|
||||
d := &Option{
|
||||
Name: "potato",
|
||||
Value: SizeSuffix(17 << 20),
|
||||
}
|
||||
assert.Equal(t, "17Mi", d.String())
|
||||
assert.Equal(t, "SizeSuffix", d.Type())
|
||||
err := d.Set("18M")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
||||
err = d.Set("sdfsdf")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// Test options
|
||||
var (
|
||||
nouncOption = Option{
|
||||
Name: "nounc",
|
||||
}
|
||||
copyLinksOption = Option{
|
||||
Name: "copy_links",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "L",
|
||||
Advanced: true,
|
||||
}
|
||||
caseInsensitiveOption = Option{
|
||||
Name: "case_insensitive",
|
||||
Default: false,
|
||||
Value: true,
|
||||
Advanced: true,
|
||||
}
|
||||
testOptions = Options{nouncOption, copyLinksOption, caseInsensitiveOption}
|
||||
)
|
||||
|
||||
func TestOptionsSetValues(t *testing.T) {
|
||||
assert.Nil(t, testOptions[0].Default)
|
||||
assert.Equal(t, false, testOptions[1].Default)
|
||||
assert.Equal(t, false, testOptions[2].Default)
|
||||
testOptions.setValues()
|
||||
assert.Equal(t, "", testOptions[0].Default)
|
||||
assert.Equal(t, false, testOptions[1].Default)
|
||||
assert.Equal(t, false, testOptions[2].Default)
|
||||
}
|
||||
|
||||
func TestOptionsGet(t *testing.T) {
|
||||
opt := testOptions.Get("copy_links")
|
||||
assert.Equal(t, ©LinksOption, opt)
|
||||
opt = testOptions.Get("not_found")
|
||||
assert.Nil(t, opt)
|
||||
}
|
||||
|
||||
func TestOptionsOveridden(t *testing.T) {
|
||||
m := configmap.New()
|
||||
m1 := configmap.Simple{
|
||||
"nounc": "m1",
|
||||
"copy_links": "m1",
|
||||
}
|
||||
m.AddGetter(m1, configmap.PriorityNormal)
|
||||
m2 := configmap.Simple{
|
||||
"nounc": "m2",
|
||||
"case_insensitive": "m2",
|
||||
}
|
||||
m.AddGetter(m2, configmap.PriorityConfig)
|
||||
m3 := configmap.Simple{
|
||||
"nounc": "m3",
|
||||
}
|
||||
m.AddGetter(m3, configmap.PriorityDefault)
|
||||
got := testOptions.Overridden(m)
|
||||
assert.Equal(t, configmap.Simple{
|
||||
"copy_links": "m1",
|
||||
"nounc": "m1",
|
||||
}, got)
|
||||
}
|
||||
|
||||
func TestOptionsNonDefault(t *testing.T) {
|
||||
m := configmap.Simple{}
|
||||
got := testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{}, got)
|
||||
|
||||
m["case_insensitive"] = "false"
|
||||
got = testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{}, got)
|
||||
|
||||
m["case_insensitive"] = "true"
|
||||
got = testOptions.NonDefault(m)
|
||||
assert.Equal(t, configmap.Simple{"case_insensitive": "true"}, got)
|
||||
}
|
||||
|
||||
func TestOptionMarshalJSON(t *testing.T) {
|
||||
out, err := json.MarshalIndent(&caseInsensitiveOption, "", "")
|
||||
assert.NoError(t, err)
|
||||
require.Equal(t, `{
|
||||
"Name": "case_insensitive",
|
||||
"FieldName": "",
|
||||
"Help": "",
|
||||
"Default": false,
|
||||
"Value": true,
|
||||
"Hide": 0,
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"NoPrefix": false,
|
||||
"Advanced": true,
|
||||
"Exclusive": false,
|
||||
"Sensitive": false,
|
||||
"DefaultStr": "false",
|
||||
"ValueStr": "true",
|
||||
"Type": "bool"
|
||||
}`, string(out))
|
||||
}
|
||||
|
||||
func TestOptionGetValue(t *testing.T) {
|
||||
assert.Equal(t, "", nouncOption.GetValue())
|
||||
assert.Equal(t, false, copyLinksOption.GetValue())
|
||||
assert.Equal(t, true, caseInsensitiveOption.GetValue())
|
||||
}
|
||||
|
||||
func TestOptionString(t *testing.T) {
|
||||
assert.Equal(t, "", nouncOption.String())
|
||||
assert.Equal(t, "false", copyLinksOption.String())
|
||||
assert.Equal(t, "true", caseInsensitiveOption.String())
|
||||
}
|
||||
|
||||
func TestOptionStringStringArray(t *testing.T) {
|
||||
opt := Option{
|
||||
Name: "string_array",
|
||||
Default: []string(nil),
|
||||
}
|
||||
assert.Equal(t, "", opt.String())
|
||||
opt.Default = []string{}
|
||||
assert.Equal(t, "", opt.String())
|
||||
opt.Default = []string{"a", "b"}
|
||||
assert.Equal(t, "a,b", opt.String())
|
||||
opt.Default = []string{"hello, world!", "goodbye, world!"}
|
||||
assert.Equal(t, `"hello, world!","goodbye, world!"`, opt.String())
|
||||
}
|
||||
|
||||
func TestOptionStringSizeSuffix(t *testing.T) {
|
||||
opt := Option{
|
||||
Name: "size_suffix",
|
||||
Default: SizeSuffix(0),
|
||||
}
|
||||
assert.Equal(t, "0", opt.String())
|
||||
opt.Default = SizeSuffix(-1)
|
||||
assert.Equal(t, "off", opt.String())
|
||||
opt.Default = SizeSuffix(100)
|
||||
assert.Equal(t, "100B", opt.String())
|
||||
opt.Default = SizeSuffix(1024)
|
||||
assert.Equal(t, "1Ki", opt.String())
|
||||
}
|
||||
|
||||
func TestOptionSet(t *testing.T) {
|
||||
o := caseInsensitiveOption
|
||||
assert.Equal(t, true, o.Value)
|
||||
err := o.Set("FALSE")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, o.Value)
|
||||
|
||||
o = copyLinksOption
|
||||
assert.Equal(t, nil, o.Value)
|
||||
err = o.Set("True")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, o.Value)
|
||||
|
||||
err = o.Set("INVALID")
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, true, o.Value)
|
||||
}
|
||||
|
||||
func TestOptionType(t *testing.T) {
|
||||
assert.Equal(t, "string", nouncOption.Type())
|
||||
assert.Equal(t, "bool", copyLinksOption.Type())
|
||||
assert.Equal(t, "bool", caseInsensitiveOption.Type())
|
||||
}
|
||||
|
||||
func TestOptionFlagName(t *testing.T) {
|
||||
assert.Equal(t, "local-nounc", nouncOption.FlagName("local"))
|
||||
assert.Equal(t, "copy-links", copyLinksOption.FlagName("local"))
|
||||
assert.Equal(t, "local-case-insensitive", caseInsensitiveOption.FlagName("local"))
|
||||
}
|
||||
|
||||
func TestOptionEnvVarName(t *testing.T) {
|
||||
assert.Equal(t, "RCLONE_LOCAL_NOUNC", nouncOption.EnvVarName("local"))
|
||||
assert.Equal(t, "RCLONE_LOCAL_COPY_LINKS", copyLinksOption.EnvVarName("local"))
|
||||
assert.Equal(t, "RCLONE_LOCAL_CASE_INSENSITIVE", caseInsensitiveOption.EnvVarName("local"))
|
||||
}
|
||||
|
||||
func TestOptionGetters(t *testing.T) {
|
||||
// Set up env vars
|
||||
envVars := [][2]string{
|
||||
{"RCLONE_CONFIG_LOCAL_POTATO_PIE", "yes"},
|
||||
{"RCLONE_COPY_LINKS", "TRUE"},
|
||||
{"RCLONE_LOCAL_NOUNC", "NOUNC"},
|
||||
}
|
||||
for _, ev := range envVars {
|
||||
assert.NoError(t, os.Setenv(ev[0], ev[1]))
|
||||
}
|
||||
defer func() {
|
||||
for _, ev := range envVars {
|
||||
assert.NoError(t, os.Unsetenv(ev[0]))
|
||||
}
|
||||
}()
|
||||
|
||||
oldConfigFileGet := ConfigFileGet
|
||||
ConfigFileGet = func(section, key string) (string, bool) {
|
||||
if section == "sausage" && key == "key1" {
|
||||
return "value1", true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
defer func() {
|
||||
ConfigFileGet = oldConfigFileGet
|
||||
}()
|
||||
|
||||
// set up getters
|
||||
|
||||
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
|
||||
configEnvVarsGetter := configEnvVars("local")
|
||||
|
||||
// A configmap.Getter to read from the environment RCLONE_option_name
|
||||
optionEnvVarsGetter := optionEnvVars{"local", testOptions}
|
||||
|
||||
// A configmap.Getter to read either the default value or the set
|
||||
// value from the RegInfo.Options
|
||||
regInfoValuesGetterFalse := ®InfoValues{
|
||||
options: testOptions,
|
||||
useDefault: false,
|
||||
}
|
||||
regInfoValuesGetterTrue := ®InfoValues{
|
||||
options: testOptions,
|
||||
useDefault: true,
|
||||
}
|
||||
|
||||
// A configmap.Setter to read from the config file
|
||||
configFileGetter := getConfigFile("sausage")
|
||||
|
||||
for i, test := range []struct {
|
||||
get configmap.Getter
|
||||
key string
|
||||
wantValue string
|
||||
wantOk bool
|
||||
}{
|
||||
{configEnvVarsGetter, "not_found", "", false},
|
||||
{configEnvVarsGetter, "potato_pie", "yes", true},
|
||||
{optionEnvVarsGetter, "not_found", "", false},
|
||||
{optionEnvVarsGetter, "copy_links", "TRUE", true},
|
||||
{optionEnvVarsGetter, "nounc", "NOUNC", true},
|
||||
{optionEnvVarsGetter, "case_insensitive", "", false},
|
||||
{regInfoValuesGetterFalse, "not_found", "", false},
|
||||
{regInfoValuesGetterFalse, "case_insensitive", "true", true},
|
||||
{regInfoValuesGetterFalse, "copy_links", "", false},
|
||||
{regInfoValuesGetterTrue, "not_found", "", false},
|
||||
{regInfoValuesGetterTrue, "case_insensitive", "true", true},
|
||||
{regInfoValuesGetterTrue, "copy_links", "false", true},
|
||||
{configFileGetter, "not_found", "", false},
|
||||
{configFileGetter, "key1", "value1", true},
|
||||
} {
|
||||
what := fmt.Sprintf("%d: %+v: %q", i, test.get, test.key)
|
||||
gotValue, gotOk := test.get.Get(test.key)
|
||||
assert.Equal(t, test.wantValue, gotValue, what)
|
||||
assert.Equal(t, test.wantOk, gotOk, what)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestOptionsNonDefaultRC(t *testing.T) {
|
||||
type cfg struct {
|
||||
X string `config:"x"`
|
||||
Y int `config:"y"`
|
||||
}
|
||||
c := &cfg{X: "a", Y: 6}
|
||||
opts := Options{
|
||||
{Name: "x", Default: "a"}, // at default, should be omitted
|
||||
{Name: "y", Default: 5}, // non-default, should be included
|
||||
}
|
||||
|
||||
got, err := opts.NonDefaultRC(c)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, map[string]any{"Y": 6}, got)
|
||||
}
|
||||
|
||||
func TestOptionsNonDefaultRCMissingKey(t *testing.T) {
|
||||
type cfg struct {
|
||||
X string `config:"x"`
|
||||
}
|
||||
c := &cfg{X: "a"}
|
||||
// Options refers to a key not present in the struct -> expect error
|
||||
opts := Options{{Name: "missing", Default: ""}}
|
||||
_, err := opts.NonDefaultRC(c)
|
||||
assert.ErrorContains(t, err, "not found")
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cluster"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -97,6 +98,7 @@ type syncCopyMove struct {
|
||||
setDirModTimesMaxLevel int // max level of the directories to set
|
||||
modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter)
|
||||
allowOverlap bool // whether we allow src and dst to overlap (i.e. for convmv)
|
||||
cluster *cluster.Cluster // non-nil to run sync via cluster
|
||||
}
|
||||
|
||||
// For keeping track of delayed modtime sets
|
||||
@@ -164,6 +166,7 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
||||
setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite),
|
||||
modifiedDirs: make(map[string]struct{}),
|
||||
allowOverlap: allowOverlap,
|
||||
cluster: cluster.GetCluster(ctx),
|
||||
}
|
||||
|
||||
s.logger, s.usingLogger = operations.GetLogger(ctx)
|
||||
@@ -496,13 +499,25 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
|
||||
dst := pair.Dst
|
||||
if s.DoMove {
|
||||
if src != dst {
|
||||
_, err = operations.MoveTransfer(ctx, fdst, dst, src.Remote(), src)
|
||||
if s.cluster != nil {
|
||||
err = s.cluster.Move(ctx, fdst, dst, src.Remote(), src)
|
||||
} else {
|
||||
_, err = operations.MoveTransfer(ctx, fdst, dst, src.Remote(), src)
|
||||
}
|
||||
} else {
|
||||
// src == dst signals delete the src
|
||||
err = operations.DeleteFile(ctx, src)
|
||||
if s.cluster != nil {
|
||||
err = s.cluster.DeleteFile(ctx, src)
|
||||
} else {
|
||||
err = operations.DeleteFile(ctx, src)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_, err = operations.Copy(ctx, fdst, dst, src.Remote(), src)
|
||||
if s.cluster != nil {
|
||||
err = s.cluster.Copy(ctx, fdst, dst, src.Remote(), src)
|
||||
} else {
|
||||
_, err = operations.Copy(ctx, fdst, dst, src.Remote(), src)
|
||||
}
|
||||
}
|
||||
s.processError(err)
|
||||
if err != nil {
|
||||
@@ -539,8 +554,13 @@ func (s *syncCopyMove) startTransfers() {
|
||||
// This stops the background transfers
|
||||
func (s *syncCopyMove) stopTransfers() {
|
||||
s.toBeUploaded.Close()
|
||||
fs.Debugf(s.fdst, "Waiting for transfers to finish")
|
||||
s.transfersWg.Wait()
|
||||
fs.Debugf(s.fdst, "Waiting for transfers to finish")
|
||||
if s.cluster != nil {
|
||||
fs.Debugf(s.fdst, "Waiting for cluster transfers to finish")
|
||||
s.processError(s.cluster.Sync(s.ctx))
|
||||
fs.Debugf(s.fdst, "Cluster transfers finished")
|
||||
}
|
||||
}
|
||||
|
||||
// This starts the background renamers.
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package runs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -45,14 +46,6 @@ type ReportRun struct {
|
||||
Runs Runs
|
||||
}
|
||||
|
||||
// Parse version numbers
|
||||
// v1.49.0
|
||||
// v1.49.0-031-g2298834e-beta
|
||||
// v1.49.0-032-g20793a5f-sharefile-beta
|
||||
// match 1 is commit number
|
||||
// match 2 is branch name
|
||||
var parseVersion = regexp.MustCompile(`^v(?:[0-9.]+)-(?:\d+)-g([0-9a-f]+)(?:-(.*))?-beta$`)
|
||||
|
||||
// FIXME take -issue or -pr parameter...
|
||||
|
||||
// NewReport initialises and returns a Report
|
||||
@@ -82,19 +75,35 @@ func NewReport(Opt RunOpt) *Report {
|
||||
// Online version
|
||||
r.URL = Opt.URLBase + r.DateTime + "/index.html"
|
||||
|
||||
// Get branch/commit out of version
|
||||
parts := parseVersion.FindStringSubmatch(r.Version)
|
||||
if len(parts) >= 3 {
|
||||
r.Commit = parts[1]
|
||||
r.Branch = parts[2]
|
||||
}
|
||||
if r.Branch == "" {
|
||||
r.Branch = "master"
|
||||
}
|
||||
// Get branch/commit
|
||||
r.Branch, r.Commit = gitBranchAndCommit()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// gitBranchAndCommit returns the current branch and commit hash.
|
||||
//
|
||||
// It returns "" on error.
|
||||
func gitBranchAndCommit() (branch, commit string) {
|
||||
// branch (empty if detached)
|
||||
var b bytes.Buffer
|
||||
cmdB := exec.Command("git", "symbolic-ref", "--short", "-q", "HEAD")
|
||||
cmdB.Stdout = &b
|
||||
if e := cmdB.Run(); e == nil {
|
||||
branch = strings.TrimSpace(b.String())
|
||||
}
|
||||
|
||||
// commit (full SHA)
|
||||
var c bytes.Buffer
|
||||
cmdC := exec.Command("git", "rev-parse", "HEAD")
|
||||
cmdC.Stdout = &c
|
||||
if e := cmdC.Run(); e == nil {
|
||||
commit = strings.TrimSpace(c.String())
|
||||
}
|
||||
|
||||
return branch, commit
|
||||
}
|
||||
|
||||
// End should be called when the tests are complete
|
||||
func (r *Report) End() {
|
||||
r.Duration = time.Since(r.StartTime)
|
||||
|
||||
@@ -139,6 +139,7 @@ backends:
|
||||
- backend: "compress"
|
||||
remote: "TestCompressDrive:"
|
||||
fastlist: false
|
||||
extratime: 2.0
|
||||
- backend: "compress"
|
||||
remote: "TestCompressS3:"
|
||||
fastlist: false
|
||||
@@ -610,6 +611,7 @@ backends:
|
||||
- backend: "zoho"
|
||||
remote: "TestZoho:"
|
||||
fastlist: false
|
||||
extratime: 2.0
|
||||
tests:
|
||||
- backend
|
||||
- backend: "hdfs"
|
||||
|
||||
@@ -8,10 +8,12 @@ PORT=28628
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
# We need to replace the remakerings in the container to create Policy-1.
|
||||
docker run --rm -d --name ${NAME} \
|
||||
-p 127.0.0.1:${PORT}:8080 \
|
||||
bouncestorage/swift-aio
|
||||
|
||||
-v $(dirname "$0")/TestSwiftAIO.d/remakerings:/etc/swift/remakerings:ro \
|
||||
openstackswift/saio
|
||||
|
||||
echo type=swift
|
||||
echo env_auth=false
|
||||
echo user=test:tester
|
||||
|
||||
46
fstest/testserver/init.d/TestSwiftAIO.d/remakerings
Executable file
46
fstest/testserver/init.d/TestSwiftAIO.d/remakerings
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/bin/sh
|
||||
|
||||
if ! grep -q "^\[storage-policy:1\]" swift.conf; then
|
||||
cat <<EOF >> swift.conf
|
||||
|
||||
[storage-policy:1]
|
||||
name = Policy-1
|
||||
EOF
|
||||
fi
|
||||
|
||||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
||||
|
||||
swift-ring-builder object.builder create 10 1 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d0 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d1 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d2 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d3 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d4 1
|
||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d5 1
|
||||
swift-ring-builder object.builder rebalance
|
||||
swift-ring-builder container.builder create 10 1 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d0 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d1 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d2 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d3 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d4 1
|
||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d5 1
|
||||
swift-ring-builder container.builder rebalance
|
||||
swift-ring-builder account.builder create 10 1 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d0 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d1 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d2 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d3 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d4 1
|
||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d5 1
|
||||
swift-ring-builder account.builder rebalance
|
||||
|
||||
# For Policy-1:
|
||||
swift-ring-builder object-1.builder create 10 1 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d0 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d1 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d2 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d3 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d4 1
|
||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d5 1
|
||||
swift-ring-builder object-1.builder rebalance
|
||||
@@ -8,9 +8,11 @@ PORT=28632
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
# We need to replace the remakerings in the container to create Policy-1.
|
||||
docker run --rm -d --name ${NAME} \
|
||||
-p 127.0.0.1:${PORT}:8080 \
|
||||
bouncestorage/swift-aio
|
||||
-v $(dirname "$0")/TestSwiftAIO.d/remakerings:/etc/swift/remakerings:ro \
|
||||
openstackswift/saio
|
||||
|
||||
echo type=swift
|
||||
echo env_auth=false
|
||||
|
||||
@@ -110,7 +110,7 @@ func start(name string) error {
|
||||
return nil
|
||||
}
|
||||
// If we got a _connect value then try to connect to it
|
||||
const maxTries = 30
|
||||
const maxTries = 100
|
||||
var rdBuf = make([]byte, 1)
|
||||
for i := 1; i <= maxTries; i++ {
|
||||
if i != 0 {
|
||||
@@ -175,7 +175,16 @@ func Start(remoteName string) (fn func(), err error) {
|
||||
if running[name] <= 0 {
|
||||
// if server isn't running check to see if this server has
|
||||
// been started already but not by us and stop it if so
|
||||
if os.Getenv(envKey(name, "type")) == "" && isRunning(name) {
|
||||
const maxTries = 10
|
||||
for i := 1; i <= maxTries; i++ {
|
||||
if os.Getenv(envKey(name, "type")) == "" && !isRunning(name) {
|
||||
fs.Logf(name, "Stopped server")
|
||||
break
|
||||
}
|
||||
if i != 1 {
|
||||
time.Sleep(time.Second)
|
||||
fs.Logf(name, "Attempting to stop %s try %d/%d", name, i, maxTries)
|
||||
}
|
||||
stop(name)
|
||||
}
|
||||
if !isRunning(name) {
|
||||
@@ -211,6 +220,6 @@ func stop(name string) {
|
||||
fs.Errorf(name, "Failed to stop server: %v", err)
|
||||
}
|
||||
running[name] = 0
|
||||
fs.Logf(name, "Stopped server")
|
||||
fs.Logf(name, "Stopping server")
|
||||
}
|
||||
}
|
||||
|
||||
148
go.mod
148
go.mod
@@ -4,36 +4,36 @@ go 1.24.0
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.2
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.2
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.218
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.242
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
|
||||
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e
|
||||
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
|
||||
github.com/abbot/go-http-auth v0.4.0
|
||||
github.com/anacrolix/dms v1.7.2
|
||||
github.com/anacrolix/log v0.16.0
|
||||
github.com/anacrolix/log v0.17.0
|
||||
github.com/atotto/clipboard v0.1.4
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.0
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.4
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0
|
||||
github.com/aws/smithy-go v1.22.5
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.8
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.2
|
||||
github.com/aws/smithy-go v1.23.0
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.12.0
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.13.0
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0
|
||||
github.com/coreos/go-semver v0.3.1
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/coreos/go-systemd/v22 v22.6.0
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/gabriel-vasile/mimetype v1.4.9
|
||||
github.com/gdamore/tcell/v2 v2.8.1
|
||||
github.com/go-chi/chi/v5 v5.2.2
|
||||
github.com/gabriel-vasile/mimetype v1.4.10
|
||||
github.com/gdamore/tcell/v2 v2.9.0
|
||||
github.com/go-chi/chi/v5 v5.2.3
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -47,49 +47,49 @@ require (
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988
|
||||
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6
|
||||
github.com/lanrat/extsort v1.4.0
|
||||
github.com/lanrat/extsort v1.4.2
|
||||
github.com/mattn/go-colorable v0.1.14
|
||||
github.com/mattn/go-runewidth v0.0.16
|
||||
github.com/mattn/go-runewidth v0.0.17
|
||||
github.com/minio/minio-go/v7 v7.0.95
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/ncw/swift/v2 v2.0.4
|
||||
github.com/oracle/oci-go-sdk/v65 v65.98.0
|
||||
github.com/oracle/oci-go-sdk/v65 v65.101.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/peterh/liner v1.2.2
|
||||
github.com/pkg/sftp v1.13.9
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||
github.com/prometheus/client_golang v1.23.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.23
|
||||
github.com/rclone/gofakes3 v0.0.4
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/rogpeppe/go-internal v1.14.1
|
||||
github.com/shirou/gopsutil/v4 v4.25.7
|
||||
github.com/shirou/gopsutil/v4 v4.25.8
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.7
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
|
||||
github.com/spf13/cobra v1.10.1
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c
|
||||
github.com/unknwon/goconfig v1.0.0
|
||||
github.com/willscott/go-nfs v0.0.3
|
||||
github.com/winfsp/cgofuse v1.6.0
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471
|
||||
github.com/xanzy/ssh-agent v0.3.3
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
github.com/zeebo/blake3 v0.2.4
|
||||
github.com/zeebo/xxh3 v1.0.2
|
||||
go.etcd.io/bbolt v1.4.2
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/oauth2 v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/sys v0.35.0
|
||||
golang.org/x/text v0.28.0
|
||||
golang.org/x/time v0.12.0
|
||||
google.golang.org/api v0.247.0
|
||||
go.etcd.io/bbolt v1.4.3
|
||||
goftp.io/server/v2 v2.0.2
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/net v0.44.0
|
||||
golang.org/x/oauth2 v0.31.0
|
||||
golang.org/x/sync v0.17.0
|
||||
golang.org/x/sys v0.36.0
|
||||
golang.org/x/text v0.29.0
|
||||
golang.org/x/time v0.13.0
|
||||
google.golang.org/api v0.250.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
@@ -97,11 +97,11 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.16.4 // indirect
|
||||
cloud.google.com/go/auth v0.16.5 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.8.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
@@ -109,24 +109,25 @@ require (
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/akavel/rsrc v0.10.2 // indirect
|
||||
github.com/anacrolix/generics v0.0.3 // indirect
|
||||
github.com/anacrolix/generics v0.1.0 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
||||
github.com/bradenaw/juniper v0.15.3 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||
@@ -139,7 +140,7 @@ require (
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/ebitengine/purego v0.9.0 // indirect
|
||||
github.com/emersion/go-message v0.18.2 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
@@ -163,7 +164,6 @@ require (
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/gorilla/schema v1.4.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@@ -183,8 +183,8 @@ require (
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/crc64nvme v1.1.1 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
@@ -199,10 +199,10 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.65.0 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect
|
||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||
github.com/relvacode/iso8601 v1.7.0 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
@@ -210,30 +210,31 @@ require (
|
||||
github.com/samber/lo v1.51.0 // indirect
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||
github.com/sony/gobreaker v1.0.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/tinylib/msgp v1.4.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a // indirect
|
||||
google.golang.org/grpc v1.74.2 // indirect
|
||||
google.golang.org/protobuf v1.36.7 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/exp v0.0.0-20250911091902-df9299821621 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 // indirect
|
||||
google.golang.org/grpc v1.75.1 // indirect
|
||||
google.golang.org/protobuf v1.36.9 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
storj.io/common v0.0.0-20250808122759-804533d519c1 // indirect
|
||||
storj.io/common v0.0.0-20250918032746-784a656bec7e // indirect
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
|
||||
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
|
||||
storj.io/infectious v0.0.2 // indirect
|
||||
@@ -246,6 +247,7 @@ require (
|
||||
github.com/ProtonMail/go-crypto v1.3.0
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||
github.com/pkg/xattr v0.4.12
|
||||
golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd
|
||||
golang.org/x/term v0.34.0
|
||||
github.com/pquerna/otp v1.5.0
|
||||
golang.org/x/mobile v0.0.0-20250911085028-6912353760cf
|
||||
golang.org/x/term v0.35.0
|
||||
)
|
||||
|
||||
340
go.sum
340
go.sum
@@ -15,8 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.16.4 h1:fXOAIQmkApVvcIn7Pc2+5J8QTMVbUGLscnSVNl11su8=
|
||||
cloud.google.com/go/auth v0.16.4/go.mod h1:j10ncYwjX/g3cdX7GpEzsdM+d+ZNsXAbb6qXA7p1Y5M=
|
||||
cloud.google.com/go/auth v0.16.5 h1:mFWNQ2FEVWAliEQWpAdH80omXFokmrnbDhUS9cBywsI=
|
||||
cloud.google.com/go/auth v0.16.5/go.mod h1:utzRfHMP+Vv0mpOkTRQoWD2q3BatTOoWbA7gCc2dUhQ=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
@@ -25,8 +25,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.8.0 h1:HxMRIbao8w17ZX6wBnjhcDkW6lTFpgcaobyVfZWqRLA=
|
||||
cloud.google.com/go/compute/metadata v0.8.0/go.mod h1:sYOGTp851OV9bOFJ9CH7elVvyzopvWQFNNghtDQ/Biw=
|
||||
cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
|
||||
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -39,10 +39,10 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2 h1:Hr5FTipp7SL07o2FvoVOX9HRiRH3CR3Mj8pxqCcdD5A=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.2/go.mod h1:QyVsSSN64v5TGltphKLQ2sQxe4OBQg0J1eKRcVBnfgE=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0 h1:MhRfI58HblXzCtWEZCO0feHs8LweePB3s90r7WaR1KU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.11.0/go.mod h1:okZ+ZURbArNdlJ+ptXoyHNuOETzOl1Oww19rm8I2WLA=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 h1:5YTBM8QDVIBN3sxBil89WfdAAqDZbyJTgh688DSxX5w=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0 h1:wL5IEG5zb7BVv1Kv0Xm92orq+5hB5Nipn3B5tn4Rqfk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.12.0/go.mod h1:J7MUC/wtRpfGVbQ5sIItY5/FuVWmvzlY21WAOfQnq/I=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=
|
||||
@@ -57,12 +57,12 @@ github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0 h1:XkkQbfMyuH2jTSjQjSoihryI8GINRcs4xp8lNawg0FI=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.5.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.218 h1:tIvcbHXNY/bq+Sno6vajOJOxhe5XbU59Fa1ohOybK+s=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.218/go.mod h1:E0BaGQbcMUcql+AfubCR/iasWKBxX5UZPivnQGC2z0M=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.242 h1:mE2LHt6hpwacgntXIATo0JJ6MW2Hcthd3V4+GHrdlg4=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.242/go.mod h1:9nNJzlafE8PnMYGb8zbEKzWsVxfgx/LV2faJgP9HIZ0=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=
|
||||
github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
@@ -98,10 +98,10 @@ github.com/akavel/rsrc v0.10.2 h1:Zxm8V5eI1hW4gGaYsJQUhxpjkENuG91ki8B4zCrvEsw=
|
||||
github.com/akavel/rsrc v0.10.2/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/anacrolix/dms v1.7.2 h1:JAAJJIlXp+jT2yEah1EbR1AFpGALHL238uSKFXec2qw=
|
||||
github.com/anacrolix/dms v1.7.2/go.mod h1:excFJW5MKBhn5yt5ZMyeE9iFVqnO6tEGQl7YG/2tUoQ=
|
||||
github.com/anacrolix/generics v0.0.3 h1:wMkQgQzq0obSy1tMkxDu7Ife7PsegOBWHDRaSW31EnM=
|
||||
github.com/anacrolix/generics v0.0.3/go.mod h1:MN3ve08Z3zSV/rTuX/ouI4lNdlfTxgdafQJiLzyNRB8=
|
||||
github.com/anacrolix/log v0.16.0 h1:DSuyb5kAJwl3Y0X1TRcStVrTS9ST9b0BHW+7neE4Xho=
|
||||
github.com/anacrolix/log v0.16.0/go.mod h1:m0poRtlr41mriZlXBQ9SOVZ8yZBkLjOkDhd5Li5pITA=
|
||||
github.com/anacrolix/generics v0.1.0 h1:r6OgogjCdml3K5A8ixUG0X9DM4jrQiMfIkZiBOGvIfg=
|
||||
github.com/anacrolix/generics v0.1.0/go.mod h1:MN3ve08Z3zSV/rTuX/ouI4lNdlfTxgdafQJiLzyNRB8=
|
||||
github.com/anacrolix/log v0.17.0 h1:cZvEGRPCbIg+WK+qAxWj/ap2Gj8cx1haOCSVxNZQpK4=
|
||||
github.com/anacrolix/log v0.17.0/go.mod h1:m0poRtlr41mriZlXBQ9SOVZ8yZBkLjOkDhd5Li5pITA=
|
||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er+LKDHDUfU5tRvFwxH0LjPpZN8OoG8Ll+liGU=
|
||||
@@ -110,46 +110,48 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.0 h1:9yH0xiY5fUnVNLRWO0AtayqwU1ndriZdN78LlhruJR4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.0/go.mod h1:VeV3K72nXnhbe4EuxxhzsDc/ByrCSlZwUnWH52Nde/I=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.4 h1:IPd0Algf1b+Qy9BcDp0sCUcIWdCQPSzDoMK3a8pcbUM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.4/go.mod h1:nwg78FjH2qvsRM1EVZlX9WuGUJOL5od+0qvm0adEzHk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 h1:GicIdnekoJsjq9wqnvyi2elW6CGMSYKhdozE7/Svh78=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3/go.mod h1:R7BIi6WNC5mc1kfRM7XM/VHC3uRWkjc396sfabq4iOo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1 h1:fWZhGAwVRK/fAN2tmt7ilH4PPAE11rDj7HytrmbZ2FE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.39.1/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10 h1:7LllDZAegXU3yk41mwM6KcPu0wmjKGQB1bg99bNdQm4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.31.10/go.mod h1:Ge6gzXPjqu4v0oHvgAwvGzYcK921GU0hQM25WF/Kl+8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14 h1:TxkI7QI+sFkTItN/6cJuMZEIVMFXeu2dI1ZffkXngKI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.14/go.mod h1:12x4Uw/vijC11XkctTjy92TNCQ+UnNJkT7fzX0Yd93E=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8 h1:gLD09eaJUdiszm7vd1btiQUYE0Hj+0I2b8AS+75z9AY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.8/go.mod h1:4RW3oMPt1POR74qVOC4SbubxAwdP4pCT0nSw3jycOU4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.8 h1:QcAh/TNGM3MWe95ilMWwnieXWXsyM33Mb/RuTGlWLm4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.8/go.mod h1:72m/ZCCgYpXJzsgI8uJFYMnXEjtZ4kkaolL9NRXLSnU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8 h1:6bgAZgRyT4RoFWhxS+aoGMFyE0cD1bSzFnEEi4bFPGI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.8/go.mod h1:KcGkXFVU8U28qS4KvLEcPxytPZPBcRawaH2Pf/0jptE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8 h1:HhJYoES3zOz34yWEpGENqJvRVPqpmJyR3+AFg9ybhdY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.8/go.mod h1:JnA+hPWeYAVbDssp83tv+ysAG8lTfLVXvSsyKg/7xNA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 h1:ZV2XK2L3HBq9sCKQiQ/MdhZJppH/rH0vddEAamsHUIs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3/go.mod h1:b9F9tk2HdHpbf3xbN7rUZcfmJI26N6NcJu/8OsBFI/0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 h1:3ZKmesYBaFX33czDl6mbrcHb6jeheg6LqjJhQdefhsY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3/go.mod h1:7ryVb78GLCnjq7cw45N6oUb9REl7/vNUwjvIqC5UgdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 h1:SE/e52dq9a05RuxzLcjT+S5ZpQobj3ie3UTaSf2NnZc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3/go.mod h1:zkpvBTsR020VVr8TOrwK2TrUW9pOir28sH5ECHpnAfo=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 h1:egoDf+Geuuntmw79Mz6mk9gGmELCPzg5PFEABOHB+6Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0/go.mod h1:t9MDi29H+HDbkolTSQtbI0HP9DemAWQzUjmWC7LGMnE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 h1:Mc/MKBf2m4VynyJkABoVEN+QzkfLqGj0aiJuEe7cMeM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.28.0/go.mod h1:iS5OmxEcN4QIPXARGhavH7S8kETNL11kym6jhoS7IUQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 h1:6csaS/aJmqZQbKhi1EyEMM7yBW653Wy/B9hnBofW+sw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0/go.mod h1:59qHWaY5B+Rs7HGTuVGaC32m0rdpQ68N8QCN3khYiqs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8=
|
||||
github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
|
||||
github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.8 h1:1/bT9kDdLQzfZ1e6J6hpW+SfNDd6xrV8F3M2CuGyUz8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.8/go.mod h1:RbdwTONAIi59ej/+1H+QzZORt5bcyAtbrS7FQb2pvz0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.8 h1:tIN8MFT1z5STK5kTdOT1TCfMN/bn5fSEnlKsTL8qBOU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.8/go.mod h1:VKS56txtNWjKI8FqD/hliL0BcshyF4ZaLBa1rm2Y+5s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8 h1:M6JI2aGFEzYxsF6CXIuRBnkge9Wf9a2xU39rNeXgu10=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.8/go.mod h1:Fw+MyTwlwjFsSTE31mH211Np+CUslml8mzc0AFEG09s=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.8 h1:AgYCo1Rb8XChJXA871BXHDNxNWOTAr6V5YdsRIBbgv0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.8/go.mod h1:Au9dvIGm1Hbqnt29d3VakOCQuN9l0WrkDDTRq8biWS4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.2 h1:T7b3qniouutV5Wwa9B1q7gW+Y8s1B3g9RE9qa7zLBIM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.88.2/go.mod h1:tW9TsLb6t1eaTdBE6LITyJW1m/+DjQPU78Q/jT2FJu8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4 h1:FTdEN9dtWPB0EOURNtDPmwGp6GGvMqRJCAihkSl/1No=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.29.4/go.mod h1:mYubxV9Ff42fZH4kexj43gFPhgc/LyC7KqvUKt1watc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 h1:I7ghctfGXrscr7r1Ga/mDqSJKm7Fkpl5Mwq79Z+rZqU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0/go.mod h1:Zo9id81XP6jbayIFWNuDpA6lMBWhsVy+3ou2jLa4JnA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 h1:+LVB0xBqEgjQoqr9bGZbRzvg212B0f17JdflleJRNR4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5/go.mod h1:xoaxeqnnUaZjPjaICgIy5B+MHCSb/ZSOn4MvkFNOUA0=
|
||||
github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
|
||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo=
|
||||
github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||
@@ -175,8 +177,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.12.0 h1:uveBJeNpJztKDwFW/B+Wuklq584hQmQXlo+hGTSOGZ8=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.12.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.13.0 h1:ugiQwb7DwpWQnete2AZkTh94MonZKmxD7hDGy1qTzDs=
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.13.0/go.mod h1:ireC4gqVetsjVhYlwjUJwKTbZuWjEIynbR9zQTlqsvo=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc h1:t8YjNUCt1DimB4HCIXBztwWMhgxr5yG5/YaRl9Afdfg=
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc/go.mod h1:CgWpFCFWzzEA5hVkhAc6DZZzGd3czx+BblvOzjmg6KA=
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc h1:0xCWmFKBmarCqqqLeM7jFBSw/Or81UEElFqO8MY+GDs=
|
||||
@@ -188,8 +190,8 @@ github.com/colinmarc/hdfs/v2 v2.4.0 h1:v6R8oBx/Wu9fHpdPoJJjpGSUxo8NhHIwrwsfhFvU9
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0/go.mod h1:0NAO+/3knbMx6+5pCv+Hcbaz4xn/Zzbn9+WIib2rKVI=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
|
||||
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
@@ -209,11 +211,10 @@ github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 h1:FT+t0UEDykcor4y3dMVKXI
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5/go.mod h1:rSS3kM9XMzSQ6pw91Qgd6yB5jdt70N4OdtrAf74As5M=
|
||||
github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
|
||||
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
|
||||
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k=
|
||||
github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/emersion/go-message v0.18.2 h1:rl55SQdjd9oJcIoQNhubD2Acs1E6IzlZISRTK7x/Lpg=
|
||||
github.com/emersion/go-message v0.18.2/go.mod h1:XpJyL70LwRvq2a8rVbHXikPgKj8+aI0kGdHlg16ibYA=
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff h1:4N8wnS3f1hNHSmFD5zgFkWCyA4L1kCDkImPAtK7D6tg=
|
||||
@@ -232,20 +233,20 @@ github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0X
|
||||
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw=
|
||||
github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo=
|
||||
github.com/gdamore/tcell/v2 v2.8.1 h1:KPNxyqclpWpWQlPLx6Xui1pMk8S+7+R37h3g07997NU=
|
||||
github.com/gdamore/tcell/v2 v2.8.1/go.mod h1:bj8ori1BG3OYMjmb3IklZVWfZUJ1UBQt9JXrOCOhGWw=
|
||||
github.com/gdamore/tcell/v2 v2.9.0 h1:N6t+eqK7/xwtRPwxzs1PXeRWnm0H9l02CrgJ7DLn1ys=
|
||||
github.com/gdamore/tcell/v2 v2.9.0/go.mod h1:8/ZoqM9rxzYphT9tH/9LnunhV9oPBqwS8WHGYm5nrmo=
|
||||
github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64=
|
||||
github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
|
||||
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
|
||||
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
|
||||
github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-chi/chi/v5 v5.2.3 h1:WQIt9uxdsAbgIYgid+BpYc+liqQZGMHRaUwp0JUcvdE=
|
||||
github.com/go-chi/chi/v5 v5.2.3/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68=
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348/go.mod h1:Czxo/d1g948LtrALAZdL04TL/HnkopquAjxYUuI02bo=
|
||||
github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
|
||||
@@ -281,7 +282,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
|
||||
github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
|
||||
@@ -351,9 +351,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo=
|
||||
github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
|
||||
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
|
||||
github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
|
||||
@@ -399,7 +398,6 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3 h1:ZxO6Qr2GOXPdcW80Mcn3nemvilMPvpWqxrNfK2ZnNNs=
|
||||
github.com/jlaffaye/ftp v0.2.1-0.20240918233326-1b970516f5d3/go.mod h1:dvLUr/8Fs9a2OBrEnCC5duphbkz/k/mSy5OkXg3PAgI=
|
||||
github.com/josephspurrier/goversioninfo v1.5.0 h1:9TJtORoyf4YMoWSOo/cXFN9A/lB3PniJ91OxIH6e7Zg=
|
||||
@@ -423,7 +421,6 @@ github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYW
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 h1:CjEMN21Xkr9+zwPmZPaJJw+apzVbjGL5uK/6g9Q2jGU=
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988/go.mod h1:/agobYum3uo/8V6yPVnq+R82pyVGCeuWW5arT4Txn8A=
|
||||
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 h1:FHVoZMOVRA+6/y4yRlbiR3WvsrOcKBd/f64H7YiWR2U=
|
||||
@@ -440,31 +437,29 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/lanrat/extsort v1.4.0 h1:jysS/Tjnp7mBwJ6NG8SY+XYFi8HF3LujGbqY9jOWjco=
|
||||
github.com/lanrat/extsort v1.4.0/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4=
|
||||
github.com/lanrat/extsort v1.4.2 h1:akbLIdo4PhNZtvjpaWnbXtGMmLtnGzXplkzfgl+XTTY=
|
||||
github.com/lanrat/extsort v1.4.2/go.mod h1:hceP6kxKPKebjN1RVrDBXMXXECbaI41Y94tt6MDazc4=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/lpar/date v1.0.0 h1:bq/zVqFTUmsxvd/CylidY4Udqpr9BOFrParoP6p0x/I=
|
||||
github.com/lpar/date v1.0.0/go.mod h1:KjYe0dDyMQTgpqcUz4LEIeM5VZwhggjVx/V2dtc8NSo=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc=
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0 h1:2/yBRLdWBZKrf7gB40FoiKfAWYQ0lqNcbuQwVHXptag=
|
||||
github.com/lucasb-eyer/go-colorful v1.3.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 h1:mFWunSatvkQQDhpdyuFAYwyAan3hzCuma+Pz8sqvOfg=
|
||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.17 h1:78v8ZlW0bP43XfmAfPsdXcoNCelfMHsDmd/pkENfrjQ=
|
||||
github.com/mattn/go-runewidth v0.0.17/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=
|
||||
github.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/minio-go/v7 v7.0.95 h1:ywOUPg+PebTMTzn9VDsoFJy32ZuARN9zhB+K3IYEvYU=
|
||||
github.com/minio/minio-go/v7 v7.0.95/go.mod h1:wOOX3uxS334vImCNRVyIDdXX9OsXDm89ToynKgqUKlo=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU=
|
||||
github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
@@ -491,8 +486,8 @@ github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU
|
||||
github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.98.0 h1:ZKsy97KezSiYSN1Fml4hcwjpO+wq01rjBkPqIiUejVc=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.98.0/go.mod h1:RGiXfpDDmRRlLtqlStTzeBjjdUNXyqm3KXKyLCm3A/Q=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.101.0 h1:EErMOuw98JXi0P7DgPg5zjouCA5s61iWD5tFWNCVLHk=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.101.0/go.mod h1:RGiXfpDDmRRlLtqlStTzeBjjdUNXyqm3KXKyLCm3A/Q=
|
||||
github.com/panjf2000/ants/v2 v2.11.3 h1:AfI0ngBoXJmYOpDh9m516vjqoUu2sLrIVgppI9TZVpg=
|
||||
github.com/panjf2000/ants/v2 v2.11.3/go.mod h1:8u92CYMUc6gyvTIw8Ru7Mt7+/ESnJahz5EVtqfrilek=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@@ -519,31 +514,32 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
|
||||
github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
|
||||
github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
|
||||
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.23 h1:lxjt5B6ZCiBeeNO8/oQsegE6fLeCzuMRoVWSkXC4uvY=
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.23/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU=
|
||||
github.com/quic-go/quic-go v0.53.0 h1:QHX46sISpG2S03dPeZBgVIZp8dGagIaiu2FiVYvpCZI=
|
||||
github.com/quic-go/quic-go v0.53.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
|
||||
github.com/rclone/gofakes3 v0.0.4 h1:LswpC49VY/UJ1zucoL5ktnOEX6lq3qK7e1aFIAfqCbk=
|
||||
github.com/rclone/gofakes3 v0.0.4/go.mod h1:j/UoS+2/Mr7xAlfKhyVC58YyFQmh9uoQA5YZQXQUqmg=
|
||||
github.com/relvacode/iso8601 v1.6.0 h1:eFXUhMJN3Gz8Rcq82f9DTMW0svjtAVuIEULglM7QHTU=
|
||||
github.com/relvacode/iso8601 v1.6.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/relvacode/iso8601 v1.7.0 h1:BXy+V60stMP6cpswc+a93Mq3e65PfXCgDFfhvNNGrdo=
|
||||
github.com/relvacode/iso8601 v1.7.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@@ -562,18 +558,17 @@ github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRo
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM=
|
||||
github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df/go.mod h1:dcuzJZ83w/SqN9k4eQqwKYMgmKWzg/KzJAURBhRL1tc=
|
||||
github.com/shirou/gopsutil/v4 v4.25.7 h1:bNb2JuqKuAu3tRlPv5piSmBZyMfecwQ+t/ILq+1JqVM=
|
||||
github.com/shirou/gopsutil/v4 v4.25.7/go.mod h1:XV/egmwJtd3ZQjBpJVY5kndsiOO4IRqy9TQnmm6VP7U=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTOH970=
|
||||
github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
|
||||
github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
|
||||
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
||||
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
|
||||
github.com/snabb/httpreaderat v1.0.1 h1:whlb+vuZmyjqVop8x1EKOg05l2NE4z9lsMMXjmSUCnY=
|
||||
github.com/snabb/httpreaderat v1.0.1/go.mod h1:lpbGrKDWF37yvRbtRvQsbesS6Ty5c83t8ztannPoMsA=
|
||||
github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
@@ -581,13 +576,12 @@ github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ=
|
||||
github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.24 h1:cKixJ+evHnfJhWNyIZjBy5hoW8LTWmrJXPo18tzLNrk=
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.24/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
|
||||
github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
@@ -602,13 +596,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c h1:BLopNCyqewbE8+BtlIp/Juzu8AJGxz0gHdGADnsblVc=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c/go.mod h1:ykucQyiE9Q2qx1wLlEtZkkNn1IURib/2O+Mvd25i1Fo=
|
||||
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
||||
github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
|
||||
github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
|
||||
github.com/tinylib/msgp v1.4.0 h1:SYOeDRiydzOw9kSiwdYp9UcBgPFtLU2WDHaJXyHruf8=
|
||||
github.com/tinylib/msgp v1.4.0/go.mod h1:cvjFkb4RiC8qSBOPMGPSzSAx47nAsfhLVTCZZNuHv5o=
|
||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
||||
@@ -625,8 +621,8 @@ github.com/willscott/go-nfs v0.0.3 h1:Z5fHVxMsppgEucdkKBN26Vou19MtEM875NmRwj156R
|
||||
github.com/willscott/go-nfs v0.0.3/go.mod h1:VhNccO67Oug787VNXcyx9JDI3ZoSpqoKMT/lWMhUIDg=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o=
|
||||
github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00/go.mod h1:Tq++Lr/FgiS3X48q5FETemXiSLGuYMQT2sPjYNPJSwA=
|
||||
github.com/winfsp/cgofuse v1.6.0 h1:re3W+HTd0hj4fISPBqfsrwyvPFpzqhDu8doJ9nOPDB0=
|
||||
github.com/winfsp/cgofuse v1.6.0/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471 h1:aSOo0k+aLWdhUQiUxzv4cZ7cUp3OLP+Qx7cjs6OUxME=
|
||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471/go.mod h1:uxjoF2jEYT3+x+vC2KJddEGdk/LU8pRowXmyVMHSV5I=
|
||||
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
|
||||
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
|
||||
@@ -650,8 +646,8 @@ github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
|
||||
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
|
||||
go.etcd.io/bbolt v1.4.2 h1:IrUHp260R8c+zYx/Tm8QZr04CX+qWS5PGfPdevhdm1I=
|
||||
go.etcd.io/bbolt v1.4.2/go.mod h1:Is8rSHO/b4f3XigBC0lL0+4FwAQv3HXEEIgFMuKHceM=
|
||||
go.etcd.io/bbolt v1.4.3 h1:dEadXpI6G79deX5prL3QRNP6JB8UxVkqo4UPnHaNXJo=
|
||||
go.etcd.io/bbolt v1.4.3/go.mod h1:tKQlpPaYCVFctUIgFKFnAlvbmB3tpy1vkTnDWohtc0E=
|
||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
@@ -659,35 +655,33 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
|
||||
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
|
||||
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
goftp.io/server/v2 v2.0.1 h1:H+9UbCX2N206ePDSVNCjBftOKOgil6kQ5RAQNx5hJwE=
|
||||
goftp.io/server/v2 v2.0.1/go.mod h1:7+H/EIq7tXdfo1Muu5p+l3oQ6rYkDZ8lY7IM5d5kVdQ=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
goftp.io/server/v2 v2.0.2 h1:tkZpqyXys+vC15W5yGMi8Kzmbv1QSgeKr8qJXBnJbm8=
|
||||
goftp.io/server/v2 v2.0.2/go.mod h1:Fl1WdcV7fx1pjOWx7jEHb7tsJ8VwE7+xHu6bVJ6r2qg=
|
||||
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
|
||||
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
@@ -701,8 +695,8 @@ golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -713,8 +707,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50 h1:3yiSh9fhy5/RhCSntf4Sy0Tnx50DmMpQ4MQdKKk4yg4=
|
||||
golang.org/x/exp v0.0.0-20250811191247-51f88131bc50/go.mod h1:rT6SFzZ7oxADUDx58pcaKFTcZ+inxAa9fTrYx/uVYwg=
|
||||
golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
|
||||
golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@@ -729,8 +723,8 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd h1:Qd7qm8Xr8riwtdI4F+SWrlnKK/7tLDyTQ5YNv42tvtU=
|
||||
golang.org/x/mobile v0.0.0-20250808145247-395d808d53cd/go.mod h1:Rg5Br31eIKqfc+43CRdWRfPfFqV9DjN92usHvW9563E=
|
||||
golang.org/x/mobile v0.0.0-20250911085028-6912353760cf h1:2HVicFltkNthxuudLg8n5TzyNVUESF91+X7+/fxEjSM=
|
||||
golang.org/x/mobile v0.0.0-20250911085028-6912353760cf/go.mod h1:tfwPrSLpQwNZm2LZ6L4ol2VGzxz+xdyj0fN+n4A50OQ=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
@@ -743,8 +737,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -753,7 +747,6 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
@@ -784,16 +777,16 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo=
|
||||
golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -810,13 +803,12 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -862,10 +854,9 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -877,10 +868,9 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
|
||||
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -896,20 +886,19 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI=
|
||||
golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
@@ -952,12 +941,14 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
@@ -974,8 +965,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc=
|
||||
google.golang.org/api v0.247.0/go.mod h1:r1qZOPmxXffXg6xS5uhx16Fa/UFY8QU/K4bfKrnvovM=
|
||||
google.golang.org/api v0.250.0 h1:qvkwrf/raASj82UegU2RSDGWi/89WkLckn4LuO4lVXM=
|
||||
google.golang.org/api v0.250.0/go.mod h1:Y9Uup8bDLJJtMzJyQnu+rLRJLA0wn+wTtc6vTlOvfXo=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1013,10 +1004,10 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822 h1:rHWScKit0gvAPuOnu87KpaYtjK5zBMLcULh7gxkCXu4=
|
||||
google.golang.org/genproto v0.0.0-20250603155806-513f23925822/go.mod h1:HubltRL7rMh0LfnQPkMH4NPDFEWp0jw3vixw7jEM53s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a h1:tPE/Kp+x9dMSwUm/uM0JKK0IfdiJkwAbSMSeZBXXJXc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250811230008-5f3141c8851a/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 h1:FiusG7LWj+4byqhbvmB+Q93B/mOxJLN2DTozDuZm4EU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:kXqgZtrWaf6qS3jZOCnCH7WYfrvFjkC51bM8fz3RsCA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9 h1:V1jCN2HBa8sySkR5vLcCSqJSTMv093Rw9EJefhQGP7M=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250922171735-9219d122eba9/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -1029,8 +1020,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
|
||||
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
|
||||
google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI=
|
||||
google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1041,14 +1032,13 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
|
||||
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw=
|
||||
google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
@@ -1075,8 +1065,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
storj.io/common v0.0.0-20250808122759-804533d519c1 h1:z7ZjU+TlPZ2Lq2S12hT6+Fr7jFsBxPMrPBH4zZpZuUA=
|
||||
storj.io/common v0.0.0-20250808122759-804533d519c1/go.mod h1:YNr7/ty6CmtpG5C9lEPtPXK3hOymZpueCb9QCNuPMUY=
|
||||
storj.io/common v0.0.0-20250918032746-784a656bec7e h1:wBeNT7CA1Qwnm8jGP+mKp/IW12vhytCGjVSCKeEF6xM=
|
||||
storj.io/common v0.0.0-20250918032746-784a656bec7e/go.mod h1:YNr7/ty6CmtpG5C9lEPtPXK3hOymZpueCb9QCNuPMUY=
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 h1:8OE12DvUnB9lfZcHe7IDGsuhjrY9GBAr964PVHmhsro=
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55/go.mod h1:Y9LZaa8esL1PW2IDMqJE7CFSNq7d5bQ3RI7mGPtmKMg=
|
||||
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 h1:5MZ0CyMbG6Pi0rRzUWVG6dvpXjbBYEX2oyXuj+tT+sk=
|
||||
|
||||
@@ -20,7 +20,7 @@ You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the ` + "`--{{ .Prefix }}user` and `--{{ .Prefix }}pass`" + ` flags.
|
||||
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}--user-from-header=x-remote-user`" + `).
|
||||
username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}user-from-header=x-remote-user`" + `).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration
|
||||
may lead to unauthorized access.
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
type DirEntry struct {
|
||||
remote string
|
||||
URL string
|
||||
ZipURL string
|
||||
Leaf string
|
||||
IsDir bool
|
||||
Size int64
|
||||
@@ -32,6 +33,8 @@ type Directory struct {
|
||||
DirRemote string
|
||||
Title string
|
||||
Name string
|
||||
ZipURL string
|
||||
DisableZip bool
|
||||
Entries []DirEntry
|
||||
Query string
|
||||
HTMLTemplate *template.Template
|
||||
@@ -70,6 +73,7 @@ func NewDirectory(dirRemote string, htmlTemplate *template.Template) *Directory
|
||||
DirRemote: dirRemote,
|
||||
Title: fmt.Sprintf("Directory listing of /%s", dirRemote),
|
||||
Name: fmt.Sprintf("/%s", dirRemote),
|
||||
ZipURL: "?download=zip",
|
||||
HTMLTemplate: htmlTemplate,
|
||||
Breadcrumb: breadcrumb,
|
||||
}
|
||||
@@ -99,11 +103,15 @@ func (d *Directory) AddHTMLEntry(remote string, isDir bool, size int64, modTime
|
||||
d.Entries = append(d.Entries, DirEntry{
|
||||
remote: remote,
|
||||
URL: rest.URLPathEscape(urlRemote) + d.Query,
|
||||
ZipURL: "",
|
||||
Leaf: leaf,
|
||||
IsDir: isDir,
|
||||
Size: size,
|
||||
ModTime: modTime,
|
||||
})
|
||||
if isDir {
|
||||
d.Entries[len(d.Entries)-1].ZipURL = rest.URLPathEscape(urlRemote) + "?download=zip"
|
||||
}
|
||||
}
|
||||
|
||||
// AddEntry adds an entry to that directory
|
||||
|
||||
@@ -46,11 +46,11 @@ func TestAddHTMLEntry(t *testing.T) {
|
||||
d.AddHTMLEntry("a/b/c/colon:colon.txt", false, 64, modtime)
|
||||
d.AddHTMLEntry("\"quotes\".txt", false, 64, modtime)
|
||||
assert.Equal(t, []DirEntry{
|
||||
{remote: "", URL: "/", Leaf: "/", IsDir: true, Size: 0, ModTime: modtime},
|
||||
{remote: "dir", URL: "dir/", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime},
|
||||
{remote: "a/b/c/d.txt", URL: "d.txt", Leaf: "d.txt", IsDir: false, Size: 64, ModTime: modtime},
|
||||
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", Leaf: "colon:colon.txt", IsDir: false, Size: 64, ModTime: modtime},
|
||||
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", Leaf: "\"quotes\".txt", Size: 64, IsDir: false, ModTime: modtime},
|
||||
{remote: "", URL: "/", ZipURL: "/?download=zip", Leaf: "/", IsDir: true, Size: 0, ModTime: modtime},
|
||||
{remote: "dir", URL: "dir/", ZipURL: "dir/?download=zip", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime},
|
||||
{remote: "a/b/c/d.txt", URL: "d.txt", ZipURL: "", Leaf: "d.txt", IsDir: false, Size: 64, ModTime: modtime},
|
||||
{remote: "a/b/c/colon:colon.txt", URL: "./colon:colon.txt", ZipURL: "", Leaf: "colon:colon.txt", IsDir: false, Size: 64, ModTime: modtime},
|
||||
{remote: "\"quotes\".txt", URL: "%22quotes%22.txt", ZipURL: "", Leaf: "\"quotes\".txt", Size: 64, IsDir: false, ModTime: modtime},
|
||||
}, d.Entries)
|
||||
|
||||
// Now test with a query parameter
|
||||
@@ -58,8 +58,8 @@ func TestAddHTMLEntry(t *testing.T) {
|
||||
d.AddHTMLEntry("file", false, 64, modtime)
|
||||
d.AddHTMLEntry("dir", true, 0, modtime)
|
||||
assert.Equal(t, []DirEntry{
|
||||
{remote: "file", URL: "file?potato=42", Leaf: "file", IsDir: false, Size: 64, ModTime: modtime},
|
||||
{remote: "dir", URL: "dir/?potato=42", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime},
|
||||
{remote: "file", URL: "file?potato=42", ZipURL: "", Leaf: "file", IsDir: false, Size: 64, ModTime: modtime},
|
||||
{remote: "dir", URL: "dir/?potato=42", ZipURL: "dir/?download=zip", Leaf: "dir/", IsDir: true, Size: 0, ModTime: modtime},
|
||||
}, d.Entries)
|
||||
}
|
||||
|
||||
|
||||
@@ -59,6 +59,8 @@ inserts leading and trailing "/" on ` + "`--{{ .Prefix }}baseurl`" + `, so ` + "
|
||||
` + "`--{{ .Prefix }}baseurl \"/rclone\"` and `--{{ .Prefix }}baseurl \"/rclone/\"`" + ` are all treated
|
||||
identically.
|
||||
|
||||
` + "`--{{ .Prefix }}disable-zip`" + ` may be set to disable the zipping download option.
|
||||
|
||||
#### TLS (SSL)
|
||||
|
||||
By default this will serve over http. If you want you can serve over
|
||||
|
||||
@@ -21,7 +21,7 @@ Modifications: Adapted to rclone markup -->
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta name="google" content="notranslate">
|
||||
<style>
|
||||
<style>/
|
||||
* { padding: 0; margin: 0; }
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
@@ -187,6 +187,19 @@ footer {
|
||||
max-width: 100px;
|
||||
}
|
||||
}
|
||||
td .zip {
|
||||
opacity: 0;
|
||||
margin-left: 6px;
|
||||
transition: opacity 0.15s ease-in-out;
|
||||
}
|
||||
tr.file:hover td .zip {
|
||||
opacity: 1;
|
||||
}
|
||||
.zip-root {
|
||||
margin-left: 8px;
|
||||
vertical-align: middle;
|
||||
opacity: 1;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body onload='filter();toggle("order");changeSize()'>
|
||||
@@ -206,6 +219,9 @@ footer {
|
||||
<path d="M126.154134,250.559184 C126.850974,251.883673 127.300549,253.006122 127.772602,254.106122 C128.469442,255.206122 128.919016,256.104082 129.638335,257.002041 C130.559962,258.326531 131.728855,259 133.100057,259 C134.493737,259 135.415364,258.55102 136.112204,257.67551 C136.809044,257.002041 137.258619,255.902041 137.258619,254.577551 C137.258619,253.904082 137.258619,252.804082 137.033832,251.457143 C136.786566,249.908163 136.561779,249.032653 136.561779,248.583673 C136.089726,242.814286 135.864939,237.920408 135.864939,233.273469 C135.864939,225.057143 136.786566,217.514286 138.180246,210.846939 C139.798713,204.202041 141.889234,198.634694 144.429328,193.763265 C147.216689,188.869388 150.678411,184.873469 154.836973,181.326531 C158.995535,177.779592 163.626149,174.883673 168.481552,172.661224 C173.336954,170.438776 179.113983,168.665306 185.587852,167.340816 C192.061722,166.218367 198.760378,165.342857 205.481514,164.669388 C212.18017,164.220408 219.598146,163.995918 228.162535,163.995918 L246.055591,163.995918 L246.055591,195.514286 C246.055591,197.736735 246.752431,199.510204 248.370899,201.059184 C250.214153,202.608163 252.079886,203.506122 254.372715,203.506122 C256.463236,203.506122 258.531277,202.608163 260.172223,201.059184 L326.102289,137.797959 C327.720757,136.24898 328.642384,134.47551 328.642384,132.253061 C328.642384,130.030612 327.720757,128.257143 326.102289,126.708163 L260.172223,63.4469388 C258.553756,61.8979592 256.463236,61 254.395194,61 C252.079886,61 250.236632,61.8979592 248.393377,63.4469388 C246.77491,64.9959184 246.07807,66.7693878 246.07807,68.9918367 L246.07807,100.510204 L228.162535,100.510204 C166.863084,100.510204 129.166282,117.167347 115.274437,150.459184 C110.666301,161.54898 108.350993,175.310204 108.350993,191.742857 C108.350993,205.279592 113.903236,223.912245 124.760454,247.438776 C125.00772,248.112245 125.457294,249.010204 126.154134,250.559184 Z" id="Shape" fill="#FFFFFF" transform="translate(218.496689, 160.000000) scale(-1, 1) translate(-218.496689, -160.000000) "></path>
|
||||
</g>
|
||||
</g>
|
||||
<g id="zip-folder">
|
||||
<path d="M640-480v-80h80v80h-80Zm0 80h-80v-80h80v80Zm0 80v-80h80v80h-80ZM447-640l-80-80H160v480h400v-80h80v80h160v-400H640v80h-80v-80H447ZM160-160q-33 0-56.5-23.5T80-240v-480q0-33 23.5-56.5T160-800h240l80 80h320q33 0 56.5 23.5T880-640v400q0 33-23.5 56.5T800-160H160Zm0-80v-480 480Z"/>
|
||||
</g>
|
||||
<!-- File -->
|
||||
<g id="file" stroke="#000" stroke-width="25" fill="#FFF" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M13 24.12v274.76c0 6.16 5.87 11.12 13.17 11.12H239c7.3 0 13.17-4.96 13.17-11.12V136.15S132.6 13 128.37 13H26.17C18.87 13 13 17.96 13 24.12z"/>
|
||||
@@ -233,6 +249,15 @@ footer {
|
||||
<header>
|
||||
<h1>
|
||||
{{range $i, $crumb := .Breadcrumb}}<a href="{{html $crumb.Link}}">{{html $crumb.Text}}</a>{{if ne $i 0}}/{{end}}{{end}}
|
||||
|
||||
{{- if not .DisableZip}}
|
||||
<a class="zip-root" href="{{html .ZipURL}}" title="Download root as .zip">
|
||||
<svg width="1.5em" height="1.5em" viewBox="0 -960 960 960">
|
||||
<use xlink:href="#zip-folder"></use>
|
||||
</svg>
|
||||
</a>
|
||||
{{- end}}
|
||||
|
||||
</h1>
|
||||
</header>
|
||||
<main>
|
||||
@@ -283,6 +308,13 @@ footer {
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 265 323"><use xlink:href="#file"></use></svg>
|
||||
{{- end}}
|
||||
<span class="name"><a href="{{html .URL}}">{{html .Leaf}}</a></span>
|
||||
{{- if and .IsDir (not $.DisableZip)}}
|
||||
<a class="zip" href="{{html .ZipURL}}" title="Download folder as .zip">
|
||||
<svg width="1.5em" height="1.5em" viewBox="0 -960 960 960">
|
||||
<use xlink:href="#zip-folder"></use>
|
||||
</svg>
|
||||
</a>
|
||||
{{- end}}
|
||||
</td>
|
||||
{{- if .IsDir}}
|
||||
<td data-order="-1">—</td>
|
||||
|
||||
@@ -250,9 +250,7 @@ func (ts *TokenSource) reReadToken() (changed bool) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !newToken.Valid() {
|
||||
fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring")
|
||||
} else {
|
||||
if newToken.Valid() {
|
||||
fs.Debugf(ts.name, "Loaded fresh token from config file")
|
||||
changed = true
|
||||
}
|
||||
@@ -264,6 +262,8 @@ func (ts *TokenSource) reReadToken() (changed bool) {
|
||||
if changed {
|
||||
ts.token = newToken
|
||||
ts.tokenSource = nil // invalidate since we changed the token
|
||||
} else {
|
||||
fs.Debugf(ts.name, "No updated token found in the config file")
|
||||
}
|
||||
return changed
|
||||
}
|
||||
@@ -319,6 +319,8 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
||||
return ts.token, nil
|
||||
}
|
||||
|
||||
fs.Debug(ts.name, "Token expired")
|
||||
|
||||
// Try getting the token a few times
|
||||
for i := 1; i <= maxTries; i++ {
|
||||
// Try reading the token from the config file in case it has
|
||||
@@ -344,6 +346,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
||||
|
||||
token, err = ts.tokenSource.Token()
|
||||
if err == nil {
|
||||
fs.Debug(ts.name, "Token refresh successful")
|
||||
break
|
||||
}
|
||||
if newErr := maybeWrapOAuthError(err, ts.name); newErr != err {
|
||||
|
||||
@@ -47,16 +47,14 @@ func (r *Renew) renewOnExpiry() {
|
||||
}
|
||||
uploads := r.uploads.Load()
|
||||
if uploads != 0 {
|
||||
fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
|
||||
fs.Debugf(r.name, "Background refresher detected expired token - %d uploads in progress - refreshing", uploads)
|
||||
// Do a transaction
|
||||
err := r.run()
|
||||
if err == nil {
|
||||
fs.Debugf(r.name, "Token refresh successful")
|
||||
} else {
|
||||
fs.Errorf(r.name, "Token refresh failed: %v", err)
|
||||
if err != nil {
|
||||
fs.Errorf(r.name, "Background token refresher failed: %v", err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing")
|
||||
fs.Debugf(r.name, "Background refresher detected expired token but no uploads in progress - doing nothing")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
73
vfs/zip.go
Normal file
73
vfs/zip.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// CreateZip creates a zip file from a vfs.Dir writing it to w
|
||||
func CreateZip(ctx context.Context, dir *Dir, w io.Writer) (err error) {
|
||||
zipWriter := zip.NewWriter(w)
|
||||
defer fs.CheckClose(zipWriter, &err)
|
||||
var walk func(dir *Dir, root string) error
|
||||
walk = func(dir *Dir, root string) error {
|
||||
nodes, err := dir.ReadDirAll()
|
||||
if err != nil {
|
||||
return fmt.Errorf("create zip directory read: %w", err)
|
||||
}
|
||||
for _, node := range nodes {
|
||||
switch e := node.(type) {
|
||||
case *File:
|
||||
in, err := e.Open(os.O_RDONLY)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create zip open file: %w", err)
|
||||
}
|
||||
header := &zip.FileHeader{
|
||||
Name: root + e.Name(),
|
||||
Method: zip.Deflate,
|
||||
Modified: e.ModTime(),
|
||||
}
|
||||
fileWriter, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
fs.CheckClose(in, &err)
|
||||
return fmt.Errorf("create zip file header: %w", err)
|
||||
}
|
||||
_, err = io.Copy(fileWriter, in)
|
||||
if err != nil {
|
||||
fs.CheckClose(in, &err)
|
||||
return fmt.Errorf("create zip copy: %w", err)
|
||||
}
|
||||
fs.CheckClose(in, &err)
|
||||
case *Dir:
|
||||
name := root + e.Path()
|
||||
if name != "" && name[len(name)-1] != '/' {
|
||||
name += "/"
|
||||
}
|
||||
header := &zip.FileHeader{
|
||||
Name: name,
|
||||
Method: zip.Store,
|
||||
Modified: e.ModTime(),
|
||||
}
|
||||
_, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create zip directory header: %w", err)
|
||||
}
|
||||
err = walk(e, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = walk(dir, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
160
vfs/zip_test.go
Normal file
160
vfs/zip_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package vfs
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func readZip(t *testing.T, buf *bytes.Buffer) *zip.Reader {
|
||||
t.Helper()
|
||||
r, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
|
||||
require.NoError(t, err)
|
||||
return r
|
||||
}
|
||||
|
||||
func mustCreateZip(t *testing.T, d *Dir) *bytes.Buffer {
|
||||
t.Helper()
|
||||
var buf bytes.Buffer
|
||||
require.NoError(t, CreateZip(context.Background(), d, &buf))
|
||||
return &buf
|
||||
}
|
||||
|
||||
func zipReadFile(t *testing.T, zr *zip.Reader, match func(name string) bool) ([]byte, string) {
|
||||
t.Helper()
|
||||
for _, f := range zr.File {
|
||||
if strings.HasSuffix(f.Name, "/") {
|
||||
continue
|
||||
}
|
||||
if match(f.Name) {
|
||||
rc, err := f.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
b, err := io.ReadAll(rc)
|
||||
require.NoError(t, err)
|
||||
return b, f.Name
|
||||
}
|
||||
}
|
||||
t.Fatalf("zip entry matching predicate not found")
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func TestZipManyFiles(t *testing.T) {
|
||||
r, vfs := newTestVFS(t)
|
||||
|
||||
const N = 5
|
||||
want := make(map[string]string, N)
|
||||
items := make([]fstest.Item, 0, N)
|
||||
|
||||
for i := range N {
|
||||
name := fmt.Sprintf("flat/f%03d.txt", i)
|
||||
data := strings.Repeat(fmt.Sprintf("line-%d\n", i), (i%5)+1)
|
||||
it := r.WriteObject(context.Background(), name, data, t1)
|
||||
items = append(items, it)
|
||||
want[name[strings.LastIndex(name, "/")+1:]] = data
|
||||
}
|
||||
r.CheckRemoteItems(t, items...)
|
||||
|
||||
node, err := vfs.Stat("flat")
|
||||
require.NoError(t, err)
|
||||
dir := node.(*Dir)
|
||||
|
||||
buf := mustCreateZip(t, dir)
|
||||
zr := readZip(t, buf)
|
||||
|
||||
// count only file entries (skip dir entries with trailing "/")
|
||||
files := 0
|
||||
for _, f := range zr.File {
|
||||
if !strings.HasSuffix(f.Name, "/") {
|
||||
files++
|
||||
}
|
||||
}
|
||||
require.Equal(t, N, files)
|
||||
|
||||
// validate contents by base name
|
||||
for base, data := range want {
|
||||
got, _ := zipReadFile(t, zr, func(name string) bool { return name == base })
|
||||
require.Equal(t, data, string(got), "mismatch for %s", base)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZipManySubDirs(t *testing.T) {
|
||||
r, vfs := newTestVFS(t)
|
||||
|
||||
r.WriteObject(context.Background(), "a/top.txt", "top", t1)
|
||||
r.WriteObject(context.Background(), "a/b/mid.txt", "mid", t1)
|
||||
r.WriteObject(context.Background(), "a/b/c/deep.txt", "deep", t1)
|
||||
|
||||
node, err := vfs.Stat("a")
|
||||
require.NoError(t, err)
|
||||
dir := node.(*Dir)
|
||||
|
||||
buf := mustCreateZip(t, dir)
|
||||
zr := readZip(t, buf)
|
||||
|
||||
// paths may include directory prefixes; assert by suffix
|
||||
got, name := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/top.txt") || n == "top.txt" })
|
||||
require.Equal(t, "top", string(got), "bad content for %s", name)
|
||||
|
||||
got, name = zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/mid.txt") || n == "mid.txt" })
|
||||
require.Equal(t, "mid", string(got), "bad content for %s", name)
|
||||
|
||||
got, name = zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/deep.txt") || n == "deep.txt" })
|
||||
require.Equal(t, "deep", string(got), "bad content for %s", name)
|
||||
}
|
||||
|
||||
func TestZipLargeFiles(t *testing.T) {
|
||||
r, vfs := newTestVFS(t)
|
||||
|
||||
if strings.HasPrefix(r.Fremote.Name(), "TestChunker") {
|
||||
t.Skip("skipping test as chunker too slow")
|
||||
}
|
||||
|
||||
data := random.String(5 * 1024 * 1024)
|
||||
sum := sha256.Sum256([]byte(data))
|
||||
|
||||
r.WriteObject(context.Background(), "bigdir/big.bin", data, t1)
|
||||
|
||||
node, err := vfs.Stat("bigdir")
|
||||
require.NoError(t, err)
|
||||
dir := node.(*Dir)
|
||||
|
||||
buf := mustCreateZip(t, dir)
|
||||
zr := readZip(t, buf)
|
||||
|
||||
got, _ := zipReadFile(t, zr, func(n string) bool { return n == "big.bin" || strings.HasSuffix(n, "/big.bin") })
|
||||
require.Equal(t, sum, sha256.Sum256(got))
|
||||
}
|
||||
|
||||
func TestZipDirsInRoot(t *testing.T) {
|
||||
r, vfs := newTestVFS(t)
|
||||
|
||||
r.WriteObject(context.Background(), "dir1/a.txt", "x", t1)
|
||||
r.WriteObject(context.Background(), "dir2/b.txt", "y", t1)
|
||||
r.WriteObject(context.Background(), "dir3/c.txt", "z", t1)
|
||||
|
||||
root, err := vfs.Root()
|
||||
require.NoError(t, err)
|
||||
|
||||
buf := mustCreateZip(t, root)
|
||||
zr := readZip(t, buf)
|
||||
|
||||
// Check each file exists (ignore exact directory-entry names)
|
||||
gx, _ := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/a.txt") })
|
||||
require.Equal(t, "x", string(gx))
|
||||
|
||||
gy, _ := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/b.txt") })
|
||||
require.Equal(t, "y", string(gy))
|
||||
|
||||
gz, _ := zipReadFile(t, zr, func(n string) bool { return strings.HasSuffix(n, "/c.txt") })
|
||||
require.Equal(t, "z", string(gz))
|
||||
}
|
||||
Reference in New Issue
Block a user