mirror of
https://github.com/rclone/rclone.git
synced 2025-12-20 18:23:31 +00:00
Compare commits
3 Commits
cluster
...
jotta-auth
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f838c2a2f2 | ||
|
|
49103c7348 | ||
|
|
697874e399 |
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
@@ -291,7 +291,7 @@ jobs:
|
|||||||
README.md
|
README.md
|
||||||
RELEASE.md
|
RELEASE.md
|
||||||
CODE_OF_CONDUCT.md
|
CODE_OF_CONDUCT.md
|
||||||
docs/content/{authors,bugs,changelog,cluster,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
docs/content/{authors,bugs,changelog,docs,downloads,faq,filtering,gui,install,licence,overview,privacy}.md
|
||||||
|
|
||||||
- name: Scan edits of autogenerated files
|
- name: Scan edits of autogenerated files
|
||||||
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
run: bin/check_autogenerated_edits.py 'origin/${{ github.base_ref }}'
|
||||||
|
|||||||
@@ -628,7 +628,7 @@ You'll need to modify the following files
|
|||||||
- `backend/s3/s3.go`
|
- `backend/s3/s3.go`
|
||||||
- Add the provider to `providerOption` at the top of the file
|
- Add the provider to `providerOption` at the top of the file
|
||||||
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
|
||||||
- Exclude your provider from generic config questions (eg `region` and `endpoint`).
|
- Exclude your provider from generic config questions (eg `region` and `endpoint).
|
||||||
- Add the provider to the `setQuirks` function - see the documentation there.
|
- Add the provider to the `setQuirks` function - see the documentation there.
|
||||||
- `docs/content/s3.md`
|
- `docs/content/s3.md`
|
||||||
- Add the provider at the top of the page.
|
- Add the provider at the top of the page.
|
||||||
|
|||||||
@@ -50,7 +50,6 @@ directories to and from different cloud storage providers.
|
|||||||
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
- Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
- Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
- HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
- Hetzner Object Storage [:page_facing_up:](https://rclone.org/s3/#hetzner)
|
|
||||||
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
- Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||||
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
- HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
- HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
@@ -96,7 +95,6 @@ directories to and from different cloud storage providers.
|
|||||||
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
- Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||||
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
- QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||||
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
- Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||||
- Rabata Cloud Storage [:page_facing_up:](https://rclone.org/s3/#Rabata)
|
|
||||||
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
- Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||||
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
- Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
- RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
|
|||||||
@@ -252,9 +252,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
|||||||
}, {
|
}, {
|
||||||
Value: "us-east4",
|
Value: "us-east4",
|
||||||
Help: "Northern Virginia",
|
Help: "Northern Virginia",
|
||||||
}, {
|
|
||||||
Value: "us-east5",
|
|
||||||
Help: "Ohio",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west1",
|
Value: "us-west1",
|
||||||
Help: "Oregon",
|
Help: "Oregon",
|
||||||
|
|||||||
@@ -78,22 +78,20 @@ type service struct {
|
|||||||
scopes []string
|
scopes []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// The list of services and their settings for supporting traditional OAuth.
|
|
||||||
// Please keep these in alphabetical order, but with jottacloud first.
|
|
||||||
func getServices() []service {
|
func getServices() []service {
|
||||||
return []service{
|
return []service{
|
||||||
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
{"jottacloud", "Jottacloud", "id.jottacloud.com", "jottacloud", "desktop", []string{"openid", "jotta-default", "offline_access"}}, // Chose client id "desktop" here, will be identified as "Jottacloud for Desktop" in "My logged in devices", but could have used "jottacli" here as well.
|
||||||
|
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
{"elgiganten_dk", "Elgiganten Cloud (Denmark)", "cloud.elgiganten.dk", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
{"elgiganten_se", "Elgiganten Cloud (Sweden)", "cloud.elgiganten.se", "elgiganten", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
{"elkjop", "Elkjøp Cloud (Norway)", "cloud.elkjop.no", "elkjop", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
|
||||||
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
{"elko", "ELKO Cloud (Iceland)", "cloud.elko.is", "elko", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
{"gigantti", "Gigantti Cloud (Finland)", "cloud.gigantti.fi", "gigantti", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
|
||||||
{"mediamarkt", "MediaMarkt Cloud (Multiregional)", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
|
||||||
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
|
||||||
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
|
||||||
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
|
||||||
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
{"telia_se", "Telia Cloud (Sweden)", "cloud-auth.telia.se", "telia_se", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"telia_no", "Telia Sky (Norway)", "sky-auth.telia.no", "get", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"tele2", "Tele2 Cloud (Sweden)", "mittcloud-auth.tele2.se", "comhem", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"onlime", "Onlime (Denmark)", "cloud-auth.onlime.dk", "onlime_wl", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"mediamarkt", "MediaMarkt Cloud", "mediamarkt.jottacloud.com", "mediamarkt", "desktop", []string{"openid", "jotta-default", "offline_access"}},
|
||||||
|
{"letsgo", "Let's Go Cloud (Germany)", "letsgo.jotta.cloud", "letsgo", "desktop-win", []string{"openid", "offline_access"}},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -181,24 +179,22 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
|
|||||||
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
return fs.ConfigChooseExclusiveFixed("auth_type_done", "config_type", `Type of authentication.`, []fs.OptionExample{{
|
||||||
Value: "standard",
|
Value: "standard",
|
||||||
Help: `Standard authentication.
|
Help: `Standard authentication.
|
||||||
This is primarily supported by the official service, but may also be
|
This is primarily supported by the official service, but may also be supported
|
||||||
supported by some white-label services. It is designed for command-line
|
by some of the white-label services. It is designed for command-line
|
||||||
applications, and you will be asked to enter a single-use personal login
|
applications, and you will be asked to enter a single-use personal login token
|
||||||
token which you must manually generate from the account security settings
|
which you must manually generate from the account security settings in the
|
||||||
in the web interface of your service.`,
|
web interface of your service.`,
|
||||||
}, {
|
}, {
|
||||||
Value: "traditional",
|
Value: "traditional",
|
||||||
Help: `Traditional authentication.
|
Help: `Traditional authentication.
|
||||||
This is supported by the official service and all white-label services
|
This is supported by the official service and most of the white-label
|
||||||
that rclone knows about. You will be asked which service to connect to.
|
services, you will be asked which service to connect to. You need to be on
|
||||||
It has a limitation of only a single active authentication at a time. You
|
a machine with an internet-connected web browser.`,
|
||||||
need to be on, or have access to, a machine with an internet-connected
|
|
||||||
web browser.`,
|
|
||||||
}, {
|
}, {
|
||||||
Value: "legacy",
|
Value: "legacy",
|
||||||
Help: `Legacy authentication.
|
Help: `Legacy authentication.
|
||||||
This is no longer supported by any known services and not recommended
|
This is no longer supported by any known services and not recommended used.
|
||||||
used. You will be asked for your account's username and password.`,
|
You will be asked for your account's username and password.`,
|
||||||
}})
|
}})
|
||||||
case "auth_type_done":
|
case "auth_type_done":
|
||||||
// Jump to next state according to config chosen
|
// Jump to next state according to config chosen
|
||||||
@@ -1009,13 +1005,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.features.ListR = nil
|
f.features.ListR = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f.user = cust.Username
|
|
||||||
f.setEndpoints()
|
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
@@ -1025,6 +1014,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
|
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
f.user = cust.Username
|
||||||
|
f.setEndpoints()
|
||||||
|
|
||||||
if root != "" && !rootIsDir {
|
if root != "" && !rootIsDir {
|
||||||
// Check to see if the root actually an existing file
|
// Check to see if the root actually an existing file
|
||||||
remote := path.Base(root)
|
remote := path.Base(root)
|
||||||
|
|||||||
@@ -13,8 +13,6 @@ import (
|
|||||||
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
protonDriveAPI "github.com/henrybear327/Proton-API-Bridge"
|
||||||
"github.com/henrybear327/go-proton-api"
|
"github.com/henrybear327/go-proton-api"
|
||||||
|
|
||||||
"github.com/pquerna/otp/totp"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
@@ -89,17 +87,6 @@ The value can also be provided with --protondrive-2fa=000000
|
|||||||
The 2FA code of your proton drive account if the account is set up with
|
The 2FA code of your proton drive account if the account is set up with
|
||||||
two-factor authentication`,
|
two-factor authentication`,
|
||||||
Required: false,
|
Required: false,
|
||||||
}, {
|
|
||||||
Name: "otp_secret_key",
|
|
||||||
Help: `The OTP secret key
|
|
||||||
|
|
||||||
The value can also be provided with --protondrive-otp-secret-key=ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
|
|
||||||
|
|
||||||
The OTP secret key of your proton drive account if the account is set up with
|
|
||||||
two-factor authentication`,
|
|
||||||
Required: false,
|
|
||||||
Sensitive: true,
|
|
||||||
IsPassword: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: clientUIDKey,
|
Name: clientUIDKey,
|
||||||
Help: "Client uid key (internal use only)",
|
Help: "Client uid key (internal use only)",
|
||||||
@@ -204,7 +191,6 @@ type Options struct {
|
|||||||
Password string `config:"password"`
|
Password string `config:"password"`
|
||||||
MailboxPassword string `config:"mailbox_password"`
|
MailboxPassword string `config:"mailbox_password"`
|
||||||
TwoFA string `config:"2fa"`
|
TwoFA string `config:"2fa"`
|
||||||
OtpSecretKey string `config:"otp_secret_key"`
|
|
||||||
|
|
||||||
// advanced
|
// advanced
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
@@ -370,15 +356,7 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
|
|||||||
config.FirstLoginCredential.Username = opt.Username
|
config.FirstLoginCredential.Username = opt.Username
|
||||||
config.FirstLoginCredential.Password = opt.Password
|
config.FirstLoginCredential.Password = opt.Password
|
||||||
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
|
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
|
||||||
// if 2FA code is provided, use it; otherwise, generate one using the OTP secret key if provided
|
|
||||||
config.FirstLoginCredential.TwoFA = opt.TwoFA
|
config.FirstLoginCredential.TwoFA = opt.TwoFA
|
||||||
if opt.TwoFA == "" && opt.OtpSecretKey != "" {
|
|
||||||
code, err := totp.GenerateCode(opt.OtpSecretKey, time.Now())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't generate 2FA code: %w", err)
|
|
||||||
}
|
|
||||||
config.FirstLoginCredential.TwoFA = code
|
|
||||||
}
|
|
||||||
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
|
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
|
return nil, fmt.Errorf("couldn't initialize a new proton drive instance: %w", err)
|
||||||
@@ -417,14 +395,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.OtpSecretKey != "" {
|
|
||||||
var err error
|
|
||||||
opt.OtpSecretKey, err = obscure.Reveal(opt.OtpSecretKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't decrypt OtpSecretKey: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
|
|
||||||
root = strings.Trim(root, "/")
|
root = strings.Trim(root, "/")
|
||||||
|
|||||||
208
backend/s3/s3.go
208
backend/s3/s3.go
@@ -104,18 +104,12 @@ var providerOption = fs.Option{
|
|||||||
}, {
|
}, {
|
||||||
Value: "Exaba",
|
Value: "Exaba",
|
||||||
Help: "Exaba Object Storage",
|
Help: "Exaba Object Storage",
|
||||||
}, {
|
|
||||||
Value: "FileLu",
|
|
||||||
Help: "FileLu S5 (S3-Compatible Object Storage)",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "FlashBlade",
|
Value: "FlashBlade",
|
||||||
Help: "Pure Storage FlashBlade Object Storage",
|
Help: "Pure Storage FlashBlade Object Storage",
|
||||||
}, {
|
}, {
|
||||||
Value: "GCS",
|
Value: "GCS",
|
||||||
Help: "Google Cloud Storage",
|
Help: "Google Cloud Storage",
|
||||||
}, {
|
|
||||||
Value: "Hetzner",
|
|
||||||
Help: "Hetzner Object Storage",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "HuaweiOBS",
|
Value: "HuaweiOBS",
|
||||||
Help: "Huawei Object Storage Service",
|
Help: "Huawei Object Storage Service",
|
||||||
@@ -164,9 +158,6 @@ var providerOption = fs.Option{
|
|||||||
}, {
|
}, {
|
||||||
Value: "Petabox",
|
Value: "Petabox",
|
||||||
Help: "Petabox Object Storage",
|
Help: "Petabox Object Storage",
|
||||||
}, {
|
|
||||||
Value: "Rabata",
|
|
||||||
Help: "Rabata Cloud Storage",
|
|
||||||
}, {
|
}, {
|
||||||
Value: "RackCorp",
|
Value: "RackCorp",
|
||||||
Help: "RackCorp Object Storage",
|
Help: "RackCorp Object Storage",
|
||||||
@@ -359,41 +350,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to for FileLu S5.",
|
Help: "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.\n",
|
||||||
Provider: "FileLu",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "global",
|
|
||||||
Help: "Global",
|
|
||||||
}, {
|
|
||||||
Value: "us-east",
|
|
||||||
Help: "North America (US-East)",
|
|
||||||
}, {
|
|
||||||
Value: "eu-central",
|
|
||||||
Help: "Europe (EU-Central)",
|
|
||||||
}, {
|
|
||||||
Value: "ap-southeast",
|
|
||||||
Help: "Asia Pacific (AP-Southeast)",
|
|
||||||
}, {
|
|
||||||
Value: "me-central",
|
|
||||||
Help: "Middle East (ME-Central)",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "region",
|
|
||||||
Help: "Region to connect to.",
|
|
||||||
Provider: "Hetzner",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "hel1",
|
|
||||||
Help: "Helsinki",
|
|
||||||
}, {
|
|
||||||
Value: "fsn1",
|
|
||||||
Help: "Falkenstein",
|
|
||||||
}, {
|
|
||||||
Value: "nbg1",
|
|
||||||
Help: "Nuremberg",
|
|
||||||
}},
|
|
||||||
}, {
|
|
||||||
Name: "region",
|
|
||||||
Help: "Region to connect to. - the location where your bucket will be created and your data stored. Need to be same with your endpoint.\n",
|
|
||||||
Provider: "HuaweiOBS",
|
Provider: "HuaweiOBS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "af-south-1",
|
Value: "af-south-1",
|
||||||
@@ -584,20 +541,6 @@ func init() {
|
|||||||
Value: "ap-northeast-1",
|
Value: "ap-northeast-1",
|
||||||
Help: "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
|
Help: "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "region",
|
|
||||||
Help: "Region where your bucket will be created and your data stored.\n",
|
|
||||||
Provider: "Rabata",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "us-east-1",
|
|
||||||
Help: "US East (N. Virginia)",
|
|
||||||
}, {
|
|
||||||
Value: "eu-west-1",
|
|
||||||
Help: "EU (Ireland)",
|
|
||||||
}, {
|
|
||||||
Value: "eu-west-2",
|
|
||||||
Help: "EU (London)",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "region - the location where your bucket will be created and your data stored.\n",
|
Help: "region - the location where your bucket will be created and your data stored.\n",
|
||||||
@@ -714,7 +657,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,FileLu,Hetzner,HuaweiOBS,IDrive,Intercolo,IONOS,Liara,Linode,Magalu,Mega,OVHcloud,Petabox,Qiniu,Rabata,RackCorp,Scaleway,Selectel,SpectraLogic,Storj,Synology,TencentCOS,Zata",
|
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,HuaweiOBS,IDrive,Intercolo,IONOS,Liara,Linode,Magalu,Mega,OVHcloud,Petabox,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,Storj,Synology,TencentCOS,Zata",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||||
@@ -915,14 +858,6 @@ func init() {
|
|||||||
Value: "eos-anhui-1.cmecloud.cn",
|
Value: "eos-anhui-1.cmecloud.cn",
|
||||||
Help: "Anhui China (Huainan)",
|
Help: "Anhui China (Huainan)",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "endpoint",
|
|
||||||
Help: "Endpoint for FileLu S5 Object Storage.\nRequired when using FileLu S5.",
|
|
||||||
Provider: "FileLu",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "s5lu.com",
|
|
||||||
Help: "Global FileLu S5 endpoint",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for Google Cloud Storage.",
|
Help: "Endpoint for Google Cloud Storage.",
|
||||||
@@ -931,20 +866,6 @@ func init() {
|
|||||||
Value: "https://storage.googleapis.com",
|
Value: "https://storage.googleapis.com",
|
||||||
Help: "Google Cloud Storage endpoint",
|
Help: "Google Cloud Storage endpoint",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "endpoint",
|
|
||||||
Help: "Endpoint for Hetzner Object Storage",
|
|
||||||
Provider: "Hetzner",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "hel1.your-objectstorage.com",
|
|
||||||
Help: "Helsinki",
|
|
||||||
}, {
|
|
||||||
Value: "fsn1.your-objectstorage.com",
|
|
||||||
Help: "Falkenstein",
|
|
||||||
}, {
|
|
||||||
Value: "nbg1.your-objectstorage.com",
|
|
||||||
Help: "Nuremberg",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
// obs endpoints: https://developer.huaweicloud.com/intl/en-us/endpoint?OBS
|
// obs endpoints: https://developer.huaweicloud.com/intl/en-us/endpoint?OBS
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
@@ -1080,7 +1001,7 @@ func init() {
|
|||||||
Help: "APAC Cross Regional Tokyo Endpoint",
|
Help: "APAC Cross Regional Tokyo Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.hkg.ap.cloud-object-storage.appdomain.cloud",
|
Value: "s3.hkg.ap.cloud-object-storage.appdomain.cloud",
|
||||||
Help: "APAC Cross Regional Hong Kong Endpoint",
|
Help: "APAC Cross Regional HongKong Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.seo.ap.cloud-object-storage.appdomain.cloud",
|
Value: "s3.seo.ap.cloud-object-storage.appdomain.cloud",
|
||||||
Help: "APAC Cross Regional Seoul Endpoint",
|
Help: "APAC Cross Regional Seoul Endpoint",
|
||||||
@@ -1092,7 +1013,7 @@ func init() {
|
|||||||
Help: "APAC Cross Regional Tokyo Private Endpoint",
|
Help: "APAC Cross Regional Tokyo Private Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.private.hkg.ap.cloud-object-storage.appdomain.cloud",
|
Value: "s3.private.hkg.ap.cloud-object-storage.appdomain.cloud",
|
||||||
Help: "APAC Cross Regional Hong Kong Private Endpoint",
|
Help: "APAC Cross Regional HongKong Private Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.private.seo.ap.cloud-object-storage.appdomain.cloud",
|
Value: "s3.private.seo.ap.cloud-object-storage.appdomain.cloud",
|
||||||
Help: "APAC Cross Regional Seoul Private Endpoint",
|
Help: "APAC Cross Regional Seoul Private Endpoint",
|
||||||
@@ -1433,17 +1354,11 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for Rabata Object Storage.",
|
Help: "Endpoint for Zata Object Storage.",
|
||||||
Provider: "Rabata",
|
Provider: "Zata",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "s3.us-east-1.rabata.io",
|
Value: "idr01.zata.ai",
|
||||||
Help: "US East (N. Virginia)",
|
Help: "South Asia Endpoint",
|
||||||
}, {
|
|
||||||
Value: "s3.eu-west-1.rabata.io",
|
|
||||||
Help: "EU West (Ireland)",
|
|
||||||
}, {
|
|
||||||
Value: "s3.eu-west-2.rabata.io",
|
|
||||||
Help: "EU West (London)",
|
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
// RackCorp endpoints: https://www.rackcorp.com/storage/s3storage
|
// RackCorp endpoints: https://www.rackcorp.com/storage/s3storage
|
||||||
@@ -1636,18 +1551,10 @@ func init() {
|
|||||||
Value: "cos.accelerate.myqcloud.com",
|
Value: "cos.accelerate.myqcloud.com",
|
||||||
Help: "Use Tencent COS Accelerate Endpoint",
|
Help: "Use Tencent COS Accelerate Endpoint",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "endpoint",
|
|
||||||
Help: "Endpoint for Zata Object Storage.",
|
|
||||||
Provider: "Zata",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "idr01.zata.ai",
|
|
||||||
Help: "South Asia Endpoint",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,GCS,Hetzner,HuaweiOBS,IBMCOS,IDrive,Intercolo,IONOS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Petabox,Qiniu,Rabata,RackCorp,Scaleway,Selectel,StackPath,Storj,Synology,TencentCOS,Zata",
|
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,GCS,HuaweiOBS,IBMCOS,IDrive,Intercolo,IONOS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Petabox,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,Synology,TencentCOS,Zata",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "objects-us-east-1.dream.io",
|
Value: "objects-us-east-1.dream.io",
|
||||||
Help: "Dream Objects endpoint",
|
Help: "Dream Objects endpoint",
|
||||||
@@ -1949,7 +1856,7 @@ func init() {
|
|||||||
Help: "Southwest China (Guiyang)",
|
Help: "Southwest China (Guiyang)",
|
||||||
}, {
|
}, {
|
||||||
Value: "xian1",
|
Value: "xian1",
|
||||||
Help: "Northwest China (Xian)",
|
Help: "Nouthwest China (Xian)",
|
||||||
}, {
|
}, {
|
||||||
Value: "yunnan",
|
Value: "yunnan",
|
||||||
Help: "Yunnan China (Kunming)",
|
Help: "Yunnan China (Kunming)",
|
||||||
@@ -2117,20 +2024,6 @@ func init() {
|
|||||||
Value: "ap-northeast-1",
|
Value: "ap-northeast-1",
|
||||||
Help: "Northeast Asia Region 1",
|
Help: "Northeast Asia Region 1",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
Name: "location_constraint",
|
|
||||||
Help: "location where your bucket will be created and your data stored.\n",
|
|
||||||
Provider: "Rabata",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "us-east-1",
|
|
||||||
Help: "US East (N. Virginia)",
|
|
||||||
}, {
|
|
||||||
Value: "eu-west-1",
|
|
||||||
Help: "EU (Ireland)",
|
|
||||||
}, {
|
|
||||||
Value: "eu-west-2",
|
|
||||||
Help: "EU (London)",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - the location where your bucket will be located and your data stored.\n",
|
Help: "Location constraint - the location where your bucket will be located and your data stored.\n",
|
||||||
@@ -2188,7 +2081,7 @@ func init() {
|
|||||||
Help: "New York (USA) Region",
|
Help: "New York (USA) Region",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-west-1",
|
Value: "us-west-1",
|
||||||
Help: "Fremont (USA) Region",
|
Help: "Freemont (USA) Region",
|
||||||
}, {
|
}, {
|
||||||
Value: "nz",
|
Value: "nz",
|
||||||
Help: "Auckland (New Zealand) Region",
|
Help: "Auckland (New Zealand) Region",
|
||||||
@@ -2196,7 +2089,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,FileLu,HuaweiOBS,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Mega,Outscale,OVHcloud,Petabox,Qiniu,Rabata,RackCorp,Scaleway,Selectel,SpectraLogic,StackPath,Storj,TencentCOS",
|
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,HuaweiOBS,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Mega,Outscale,OVHcloud,Petabox,Qiniu,RackCorp,Scaleway,Selectel,SpectraLogic,StackPath,Storj,TencentCOS",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||||
@@ -2211,7 +2104,7 @@ doesn't copy the ACL from the source but rather writes a fresh one.
|
|||||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||||
the default (private) will be used.
|
the default (private) will be used.
|
||||||
`,
|
`,
|
||||||
Provider: "!Cloudflare,FlashBlade,Mega,Rabata,Selectel,SpectraLogic,Storj,Synology",
|
Provider: "!Cloudflare,FlashBlade,Mega,Selectel,SpectraLogic,Storj,Synology",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "default",
|
Value: "default",
|
||||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||||
@@ -2269,7 +2162,7 @@ isn't set then "acl" is used instead.
|
|||||||
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
||||||
header is added and the default (private) will be used.
|
header is added and the default (private) will be used.
|
||||||
`,
|
`,
|
||||||
Provider: "!Cloudflare,FlashBlade,Rabata,Selectel,SpectraLogic,Storj,Synology",
|
Provider: "!Cloudflare,FlashBlade,Selectel,SpectraLogic,Storj,Synology",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "private",
|
Value: "private",
|
||||||
@@ -2398,15 +2291,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
|||||||
Value: "GLACIER_IR",
|
Value: "GLACIER_IR",
|
||||||
Help: "Glacier Instant Retrieval storage class",
|
Help: "Glacier Instant Retrieval storage class",
|
||||||
}},
|
}},
|
||||||
}, {
|
|
||||||
// Mapping from here: https://www.arvancloud.ir/en/products/cloud-storage
|
|
||||||
Name: "storage_class",
|
|
||||||
Help: "The storage class to use when storing new objects in ArvanCloud.",
|
|
||||||
Provider: "ArvanCloud",
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "STANDARD",
|
|
||||||
Help: "Standard storage class",
|
|
||||||
}},
|
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -2452,6 +2336,15 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
|||||||
Value: "STANDARD",
|
Value: "STANDARD",
|
||||||
Help: "Standard storage class",
|
Help: "Standard storage class",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
// Mapping from here: https://www.arvancloud.ir/en/products/cloud-storage
|
||||||
|
Name: "storage_class",
|
||||||
|
Help: "The storage class to use when storing new objects in ArvanCloud.",
|
||||||
|
Provider: "ArvanCloud",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "STANDARD",
|
||||||
|
Help: "Standard storage class",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
@@ -2465,22 +2358,22 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
|||||||
Help: "Glacier Instant Retrieval storage class",
|
Help: "Glacier Instant Retrieval storage class",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
|
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
Help: "The storage class to use when storing new objects in Qiniu.",
|
Help: "The storage class to use when storing new objects in Tencent COS.",
|
||||||
Provider: "Qiniu",
|
Provider: "TencentCOS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "",
|
||||||
|
Help: "Default",
|
||||||
|
}, {
|
||||||
Value: "STANDARD",
|
Value: "STANDARD",
|
||||||
Help: "Standard storage class",
|
Help: "Standard storage class",
|
||||||
}, {
|
}, {
|
||||||
Value: "LINE",
|
Value: "ARCHIVE",
|
||||||
Help: "Infrequent access storage mode",
|
|
||||||
}, {
|
|
||||||
Value: "GLACIER",
|
|
||||||
Help: "Archive storage mode",
|
Help: "Archive storage mode",
|
||||||
}, {
|
}, {
|
||||||
Value: "DEEP_ARCHIVE",
|
Value: "STANDARD_IA",
|
||||||
Help: "Deep archive storage mode",
|
Help: "Infrequent access storage mode",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://www.scaleway.com/en/docs/storage/object/quickstart/
|
// Mapping from here: https://www.scaleway.com/en/docs/storage/object/quickstart/
|
||||||
@@ -2501,22 +2394,22 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
|||||||
Help: "One Zone - Infrequent Access.\nA good choice for storing secondary backup copies or easily re-creatable data.\nAvailable in the FR-PAR region only.",
|
Help: "One Zone - Infrequent Access.\nA good choice for storing secondary backup copies or easily re-creatable data.\nAvailable in the FR-PAR region only.",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
Help: "The storage class to use when storing new objects in Tencent COS.",
|
Help: "The storage class to use when storing new objects in Qiniu.",
|
||||||
Provider: "TencentCOS",
|
Provider: "Qiniu",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
|
||||||
Help: "Default",
|
|
||||||
}, {
|
|
||||||
Value: "STANDARD",
|
Value: "STANDARD",
|
||||||
Help: "Standard storage class",
|
Help: "Standard storage class",
|
||||||
}, {
|
}, {
|
||||||
Value: "ARCHIVE",
|
Value: "LINE",
|
||||||
|
Help: "Infrequent access storage mode",
|
||||||
|
}, {
|
||||||
|
Value: "GLACIER",
|
||||||
Help: "Archive storage mode",
|
Help: "Archive storage mode",
|
||||||
}, {
|
}, {
|
||||||
Value: "STANDARD_IA",
|
Value: "DEEP_ARCHIVE",
|
||||||
Help: "Infrequent access storage mode",
|
Help: "Deep archive storage mode",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_cutoff",
|
Name: "upload_cutoff",
|
||||||
@@ -3765,8 +3658,6 @@ func setQuirks(opt *Options) {
|
|||||||
case "Alibaba":
|
case "Alibaba":
|
||||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||||
useAlreadyExists = true // returns 200 OK
|
useAlreadyExists = true // returns 200 OK
|
||||||
case "Hetzner":
|
|
||||||
useAlreadyExists = false
|
|
||||||
case "HuaweiOBS":
|
case "HuaweiOBS":
|
||||||
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
|
||||||
urlEncodeListings = false
|
urlEncodeListings = false
|
||||||
@@ -3799,11 +3690,6 @@ func setQuirks(opt *Options) {
|
|||||||
case "FlashBlade":
|
case "FlashBlade":
|
||||||
mightGzip = false // Never auto gzips objects
|
mightGzip = false // Never auto gzips objects
|
||||||
virtualHostStyle = false // supports vhost but defaults to paths
|
virtualHostStyle = false // supports vhost but defaults to paths
|
||||||
case "FileLu":
|
|
||||||
listObjectsV2 = false
|
|
||||||
virtualHostStyle = false
|
|
||||||
urlEncodeListings = false
|
|
||||||
useMultipartEtag = false
|
|
||||||
case "IBMCOS":
|
case "IBMCOS":
|
||||||
listObjectsV2 = false // untested
|
listObjectsV2 = false // untested
|
||||||
virtualHostStyle = false
|
virtualHostStyle = false
|
||||||
@@ -3858,8 +3744,6 @@ func setQuirks(opt *Options) {
|
|||||||
virtualHostStyle = false
|
virtualHostStyle = false
|
||||||
case "OVHcloud":
|
case "OVHcloud":
|
||||||
// No quirks
|
// No quirks
|
||||||
case "Rabata":
|
|
||||||
// server side copy not supported
|
|
||||||
case "RackCorp":
|
case "RackCorp":
|
||||||
// No quirks
|
// No quirks
|
||||||
useMultipartEtag = false // untested
|
useMultipartEtag = false // untested
|
||||||
@@ -4140,12 +4024,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.Provider == "AWS" {
|
if opt.Provider == "AWS" {
|
||||||
f.features.DoubleSlash = true
|
f.features.DoubleSlash = true
|
||||||
}
|
}
|
||||||
if opt.Provider == "Rabata" {
|
|
||||||
f.features.Copy = nil
|
|
||||||
}
|
|
||||||
if opt.Provider == "Hetzner" {
|
|
||||||
f.features.SetTier = false
|
|
||||||
}
|
|
||||||
if opt.DirectoryMarkers {
|
if opt.DirectoryMarkers {
|
||||||
f.features.CanHaveEmptyDirectories = true
|
f.features.CanHaveEmptyDirectories = true
|
||||||
}
|
}
|
||||||
@@ -6281,10 +6159,6 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
if o.fs.opt.Provider == "Rabata" {
|
|
||||||
// Rabata does not support copying objects
|
|
||||||
return fs.ErrorCantSetModTime
|
|
||||||
}
|
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -561,21 +561,6 @@ func (f *Fs) setRoot(root string) {
|
|||||||
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the base container's policy to be used if/when we need to create a
|
|
||||||
// segments container to ensure we use the same policy.
|
|
||||||
func (f *Fs) fetchStoragePolicy(ctx context.Context, container string) (fs.Fs, error) {
|
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
|
||||||
var rxHeaders swift.Headers
|
|
||||||
_, rxHeaders, err := f.c.Container(ctx, container)
|
|
||||||
|
|
||||||
f.opt.StoragePolicy = rxHeaders["X-Storage-Policy"]
|
|
||||||
fs.Debugf(f, "Auto set StoragePolicy to %s", f.opt.StoragePolicy)
|
|
||||||
|
|
||||||
return shouldRetryHeaders(ctx, rxHeaders, err)
|
|
||||||
})
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFsWithConnection constructs an Fs from the path, container:path
|
// NewFsWithConnection constructs an Fs from the path, container:path
|
||||||
// and authenticated connection.
|
// and authenticated connection.
|
||||||
//
|
//
|
||||||
@@ -605,7 +590,6 @@ func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c
|
|||||||
f.opt.UseSegmentsContainer.Valid = true
|
f.opt.UseSegmentsContainer.Valid = true
|
||||||
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
|
fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.rootContainer != "" && f.rootDirectory != "" {
|
if f.rootContainer != "" && f.rootDirectory != "" {
|
||||||
// Check to see if the object exists - ignoring directory markers
|
// Check to see if the object exists - ignoring directory markers
|
||||||
var info swift.Object
|
var info swift.Object
|
||||||
@@ -1148,13 +1132,6 @@ func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPat
|
|||||||
container: dstContainer,
|
container: dstContainer,
|
||||||
}
|
}
|
||||||
if f.opt.UseSegmentsContainer.Value {
|
if f.opt.UseSegmentsContainer.Value {
|
||||||
if f.opt.StoragePolicy == "" {
|
|
||||||
_, err = f.fetchStoragePolicy(ctx, dstContainer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
su.container += segmentsContainerSuffix
|
su.container += segmentsContainerSuffix
|
||||||
err = f.makeContainer(ctx, su.container)
|
err = f.makeContainer(ctx, su.container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -76,7 +76,6 @@ func (f *Fs) testNoChunk(t *testing.T) {
|
|||||||
|
|
||||||
// Additional tests that aren't in the framework
|
// Additional tests that aren't in the framework
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("PolicyDiscovery", f.testPolicyDiscovery)
|
|
||||||
t.Run("NoChunk", f.testNoChunk)
|
t.Run("NoChunk", f.testNoChunk)
|
||||||
t.Run("WithChunk", f.testWithChunk)
|
t.Run("WithChunk", f.testWithChunk)
|
||||||
t.Run("WithChunkFail", f.testWithChunkFail)
|
t.Run("WithChunkFail", f.testWithChunkFail)
|
||||||
@@ -196,50 +195,4 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
|
|||||||
require.Equal(t, obj.Size(), objTarget.Size())
|
require.Equal(t, obj.Size(), objTarget.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) testPolicyDiscovery(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
container := "testPolicyDiscovery-1"
|
|
||||||
// Reset the policy so we can test if it is populated.
|
|
||||||
f.opt.StoragePolicy = ""
|
|
||||||
err := f.makeContainer(ctx, container)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = f.fetchStoragePolicy(ctx, container)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Default policy for SAIO image is 1replica.
|
|
||||||
assert.Equal(t, "1replica", f.opt.StoragePolicy)
|
|
||||||
|
|
||||||
// Create a container using a non-default policy, and check to ensure
|
|
||||||
// that the created segments container uses the same non-default policy.
|
|
||||||
policy := "Policy-1"
|
|
||||||
container = "testPolicyDiscovery-2"
|
|
||||||
|
|
||||||
f.opt.StoragePolicy = policy
|
|
||||||
err = f.makeContainer(ctx, container)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Reset the policy so we can test if it is populated, and set to the
|
|
||||||
// non-default policy.
|
|
||||||
f.opt.StoragePolicy = ""
|
|
||||||
_, err = f.fetchStoragePolicy(ctx, container)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
|
||||||
|
|
||||||
// Test that when a segmented upload container is made, the newly
|
|
||||||
// created container inherits the non-default policy of the base
|
|
||||||
// container.
|
|
||||||
f.opt.StoragePolicy = ""
|
|
||||||
f.opt.UseSegmentsContainer.Value = true
|
|
||||||
su, err := f.newSegmentedUpload(ctx, container, "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
// The container name we expected?
|
|
||||||
segmentsContainer := container + segmentsContainerSuffix
|
|
||||||
assert.Equal(t, segmentsContainer, su.container)
|
|
||||||
// The policy we expected?
|
|
||||||
f.opt.StoragePolicy = ""
|
|
||||||
_, err = f.fetchStoragePolicy(ctx, su.container)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, policy, f.opt.StoragePolicy)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
17
cmd/cmd.go
17
cmd/cmd.go
@@ -23,7 +23,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/cluster"
|
|
||||||
"github.com/rclone/rclone/fs/config/configfile"
|
"github.com/rclone/rclone/fs/config/configfile"
|
||||||
"github.com/rclone/rclone/fs/config/configflags"
|
"github.com/rclone/rclone/fs/config/configflags"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
@@ -482,22 +481,6 @@ func initConfig() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run as a cluster worker if configured, otherwise ignoring
|
|
||||||
// the command given on the command line
|
|
||||||
if ci.Cluster != "" {
|
|
||||||
if ci.ClusterID == "" || ci.ClusterID == "0" {
|
|
||||||
fs.Infof(nil, "Running in cluster mode %q as controller", ci.ClusterID)
|
|
||||||
} else {
|
|
||||||
fs.Infof(nil, "Running in cluster mode %q as worker with id %q", ci.ClusterID, ci.ClusterID)
|
|
||||||
worker, err := cluster.NewWorker(ctx)
|
|
||||||
if err != nil || worker == nil {
|
|
||||||
fs.Fatalf(nil, "Failed to start cluster worker: %v", err)
|
|
||||||
}
|
|
||||||
// Do not continue with the main thread
|
|
||||||
select {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveExitCode(err error) {
|
func resolveExitCode(err error) {
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cluster"
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||||
@@ -39,8 +38,6 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
|||||||
"groups": "RC",
|
"groups": "RC",
|
||||||
},
|
},
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
cmd.CheckArgs(0, 1, command, args)
|
cmd.CheckArgs(0, 1, command, args)
|
||||||
if rc.Opt.Enabled {
|
if rc.Opt.Enabled {
|
||||||
fs.Fatalf(nil, "Don't supply --rc flag when using rcd")
|
fs.Fatalf(nil, "Don't supply --rc flag when using rcd")
|
||||||
@@ -52,12 +49,6 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
|||||||
rc.Opt.Files = args[0]
|
rc.Opt.Files = args[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the cluster worker if configured
|
|
||||||
_, err := cluster.NewWorker(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Fatalf(nil, "Failed to start cluster worker: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := rcserver.Start(context.Background(), &rc.Opt)
|
s, err := rcserver.Start(context.Background(), &rc.Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package s3
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/rclone/gofakes3"
|
"github.com/rclone/gofakes3"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@@ -13,23 +12,25 @@ type logger struct{}
|
|||||||
|
|
||||||
// print log message
|
// print log message
|
||||||
func (l logger) Print(level gofakes3.LogLevel, v ...any) {
|
func (l logger) Print(level gofakes3.LogLevel, v ...any) {
|
||||||
var b strings.Builder
|
var s string
|
||||||
for i := range v {
|
if len(v) == 0 {
|
||||||
if i > 0 {
|
s = ""
|
||||||
fmt.Fprintf(&b, " ")
|
} else {
|
||||||
|
var ok bool
|
||||||
|
s, ok = v[0].(string)
|
||||||
|
if !ok {
|
||||||
|
s = fmt.Sprint(v[0])
|
||||||
}
|
}
|
||||||
fmt.Fprint(&b, v[i])
|
v = v[1:]
|
||||||
}
|
}
|
||||||
s := b.String()
|
|
||||||
|
|
||||||
switch level {
|
switch level {
|
||||||
default:
|
default:
|
||||||
fallthrough
|
fallthrough
|
||||||
case gofakes3.LogErr:
|
case gofakes3.LogErr:
|
||||||
fs.Errorf("serve s3", s)
|
fs.Errorf("serve s3", s, v...)
|
||||||
case gofakes3.LogWarn:
|
case gofakes3.LogWarn:
|
||||||
fs.Infof("serve s3", s)
|
fs.Infof("serve s3", s, v...)
|
||||||
case gofakes3.LogInfo:
|
case gofakes3.LogInfo:
|
||||||
fs.Debugf("serve s3", s)
|
fs.Debugf("serve s3", s, v...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -125,7 +125,6 @@ WebDAV or S3, that work out of the box.)
|
|||||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||||
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
{{< provider name="Fastmail Files" home="https://www.fastmail.com/" config="/webdav/#fastmail-files" >}}
|
||||||
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
|
{{< provider name="FileLu Cloud Storage" home="https://filelu.com/" config="/filelu/" >}}
|
||||||
{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}}
|
|
||||||
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
{{< provider name="Files.com" home="https://www.files.com/" config="/filescom/" >}}
|
||||||
{{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
{{< provider name="FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||||
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
{{< provider name="FTP" home="https://en.wikipedia.org/wiki/File_Transfer_Protocol" config="/ftp/" >}}
|
||||||
@@ -134,11 +133,9 @@ WebDAV or S3, that work out of the box.)
|
|||||||
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
{{< provider name="Google Drive" home="https://www.google.com/drive/" config="/drive/" >}}
|
||||||
{{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}}
|
{{< provider name="Google Photos" home="https://www.google.com/photos/about/" config="/googlephotos/" >}}
|
||||||
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
|
{{< provider name="HDFS" home="https://hadoop.apache.org/" config="/hdfs/" >}}
|
||||||
{{< provider name="Hetzner Object Storage" home="https://www.hetzner.com/storage/object-storage/" config="/s3/#hetzner" >}}
|
|
||||||
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
{{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}}
|
||||||
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
{{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}}
|
||||||
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
{{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}}
|
||||||
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
|
|
||||||
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}}
|
||||||
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
{{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}}
|
||||||
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
{{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}}
|
||||||
@@ -182,10 +179,7 @@ WebDAV or S3, that work out of the box.)
|
|||||||
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
{{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}}
|
||||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||||
{{< provider name="Quatrix by Maytech" home="https://www.maytech.net/products/quatrix-business" config="/quatrix/" >}}
|
{{< provider name="Quatrix by Maytech" home="https://www.maytech.net/products/quatrix-business" config="/quatrix/" >}}
|
||||||
{{< provider name="Rabata Cloud Storage" home="https://rabata.io" config="/s3/#Rabata" >}}
|
|
||||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
|
||||||
{{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
{{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}}
|
||||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
|
||||||
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
|
||||||
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
|
||||||
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
|
||||||
|
|||||||
@@ -1016,8 +1016,3 @@ put them back in again.` >}}
|
|||||||
- Matt LaPaglia <mlapaglia@gmail.com>
|
- Matt LaPaglia <mlapaglia@gmail.com>
|
||||||
- Youfu Zhang <1315097+zhangyoufu@users.noreply.github.com>
|
- Youfu Zhang <1315097+zhangyoufu@users.noreply.github.com>
|
||||||
- juejinyuxitu <juejinyuxitu@outlook.com>
|
- juejinyuxitu <juejinyuxitu@outlook.com>
|
||||||
- iTrooz <hey@itrooz.fr>
|
|
||||||
- Microscotch <github.com@microscotch.net>
|
|
||||||
- Andrew Ruthven <andrew@etc.gen.nz>
|
|
||||||
- spiffytech <git@spiffy.tech>
|
|
||||||
- Dulani Woods <Dulani@gmail.com>
|
|
||||||
|
|||||||
@@ -1,217 +0,0 @@
|
|||||||
---
|
|
||||||
title: "Cluster"
|
|
||||||
description: "Clustering rclone"
|
|
||||||
versionIntroduced: "v1.72"
|
|
||||||
---
|
|
||||||
|
|
||||||
# Cluster
|
|
||||||
|
|
||||||
Rclone has a cluster mode invoked with the `--cluster` flag. This
|
|
||||||
enables a group of rclone instances to work together on doing a sync.
|
|
||||||
|
|
||||||
This is controlled by a group of flags starting with `--cluster-` and
|
|
||||||
enabled with the `--cluster` flag.
|
|
||||||
|
|
||||||
```text
|
|
||||||
--cluster string Enable cluster mode with remote to use as shared storage
|
|
||||||
--cluster-batch-files int Max number of files for a cluster batch (default 1000)
|
|
||||||
--cluster-batch-size SizeSuffix Max size of files for a cluster batch (default 1Ti)
|
|
||||||
--cluster-cleanup ClusterCleanup Control which cluster files get cleaned up (default full)
|
|
||||||
--cluster-id string Set to an ID for the cluster. An ID of 0 or empty becomes the controller
|
|
||||||
--cluster-quit-workers Set to cause the controller to quit the workers when it finished
|
|
||||||
```
|
|
||||||
|
|
||||||
The command might look something like this which is a normal rclone
|
|
||||||
command but with a new `--cluster` flag which points at an rclone
|
|
||||||
remote defining the cluster storage. This is the signal to rclone that
|
|
||||||
it should engage the cluster mode with a controller and workers.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
rclone copy source: destination: --flags --cluster /work
|
|
||||||
rclone copy source: destination: --flags --cluster s3:bucket
|
|
||||||
```
|
|
||||||
|
|
||||||
This works only with the `rclone sync`, `copy` and `move` commands.
|
|
||||||
|
|
||||||
If the remote specified by the `--cluster` command is inside the
|
|
||||||
`source:` or `destination:` it must be excluded with the filter flags.
|
|
||||||
|
|
||||||
Any rclone remotes used in the transfer must be defined in all cluster
|
|
||||||
nodes. Defining remotes with connection strings will get around that
|
|
||||||
problem.
|
|
||||||
|
|
||||||
## Terminology
|
|
||||||
|
|
||||||
The cluster has two logical groups, the controller and the workers.
|
|
||||||
There is one controller and many workers.
|
|
||||||
|
|
||||||
The controller and the workers will communicate with each other by
|
|
||||||
creating files in the remote pointed to by the `--cluster` flag. This
|
|
||||||
could be for example an S3 bucket or a Kubernetes PVC.
|
|
||||||
|
|
||||||
The files are JSON serialized rc commands. Multiple commands are sent
|
|
||||||
using `rc/batch`. The commands flow `pending` →`processing` → `done` →
|
|
||||||
`finished`
|
|
||||||
|
|
||||||
```text
|
|
||||||
└── queue
|
|
||||||
├── pending ← pending task files created by the controller
|
|
||||||
├── processing ← claimed tasks being executed by a worker
|
|
||||||
├── done ← finished tasks awaiting the controller to read the result
|
|
||||||
└── finished ← completed task files
|
|
||||||
```
|
|
||||||
|
|
||||||
The cluster can be set up in two ways as a persistent cluster or as a
|
|
||||||
transient cluster.
|
|
||||||
|
|
||||||
### Persistent cluster
|
|
||||||
|
|
||||||
Run a cluster of workers using
|
|
||||||
|
|
||||||
```sh
|
|
||||||
rclone rcd --cluster /work
|
|
||||||
```
|
|
||||||
|
|
||||||
Then run rclone commands when required on the cluster:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
rclone copy source: destination: --flags --cluster /work
|
|
||||||
```
|
|
||||||
|
|
||||||
In this mode there can be many rclone commands executing at once.
|
|
||||||
|
|
||||||
### Transient cluster
|
|
||||||
|
|
||||||
Run many copies of rclone simultaneously, for example in a Kubernetes
|
|
||||||
indexed job.
|
|
||||||
|
|
||||||
The rclone with `--cluster-id 0` becomes the controller and the others
|
|
||||||
become the workers. For a Kubernetes indexed job, setting
|
|
||||||
`--cluster-id $(JOB_COMPLETION_INDEX)` would work well.
|
|
||||||
|
|
||||||
Add the `--cluster-quit-workers` flag - this will cause the controller
|
|
||||||
to make sure the workers exit when it has finished.
|
|
||||||
|
|
||||||
All instances of rclone run a command like this so the whole cluster
|
|
||||||
can only run one rclone command:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
rclone copy source: destination: --flags --cluster /work --cluster-id $(JOB_COMPLETION_INDEX) --cluster-quit-workers
|
|
||||||
```
|
|
||||||
|
|
||||||
## Controller
|
|
||||||
|
|
||||||
The controller runs the sync and work distribution.
|
|
||||||
|
|
||||||
- It does the listing of the source and destination directories
|
|
||||||
comparing files in order to find files which need to be transferred.
|
|
||||||
- Files which need to be transferred are then batched into jobs of
|
|
||||||
`--cluster-batch-files` files to transfer or `--cluster-batch-size`
|
|
||||||
max size in `queue/pending` for the workers to pick up.
|
|
||||||
- It watches `queue/done` for finished jobs and updates the transfer
|
|
||||||
statistics and logs any errors, accordingly moving the job to
|
|
||||||
`queue/finished`.
|
|
||||||
|
|
||||||
Once the sync is complete, if `--cluster-quit-workers` is set, then it
|
|
||||||
sends the workers a special command which causes them all to exit.
|
|
||||||
|
|
||||||
The controller only sends transfer jobs to the workers. All the other
|
|
||||||
tasks (eg listing, comparing) are done by the controller. The
|
|
||||||
controller does not execute any transfer tasks itself.
|
|
||||||
|
|
||||||
The controller reads worker status as written to `queue/status` and
|
|
||||||
will detect workers which have stopped. If it detects a failed worker
|
|
||||||
then it will re-assign any outstanding work.
|
|
||||||
|
|
||||||
## Workers
|
|
||||||
|
|
||||||
The workers job is entirely to act as API endpoints that receive their
|
|
||||||
work via files in `/work`. Then
|
|
||||||
|
|
||||||
- Read work in `queue/pending`
|
|
||||||
- Attempt to rename into `queue/processing`
|
|
||||||
- If the cluster work directory supports atomic renames, then use
|
|
||||||
those, otherwise read the file, write the copy, delete the original.
|
|
||||||
If the delete fails then the rename was not successful (possible on
|
|
||||||
s3 backends).
|
|
||||||
- If successful then do that item of work. If not successful another
|
|
||||||
worker got there first and sleep for a bit then retry.
|
|
||||||
- After the copy is complete then remove the `queue/processing` file
|
|
||||||
or rename it into `queue/finished` if the `--cluster-cleanup` flag
|
|
||||||
allows it.
|
|
||||||
- Repeat
|
|
||||||
|
|
||||||
Every second the worker will write a status file in `queue/status` to
|
|
||||||
be read by the controller.
|
|
||||||
|
|
||||||
## Layout of the work directory
|
|
||||||
|
|
||||||
The format of the files in this directory may change without notice
|
|
||||||
but the layout is documented here as it can help debugging.
|
|
||||||
|
|
||||||
```text
|
|
||||||
/work - root of the work directory
|
|
||||||
└── queue - files to control the queue
|
|
||||||
├── done - job files that are finished and read
|
|
||||||
├── finished - job files that are finished but not yet read
|
|
||||||
├── pending - job files that are not started yet
|
|
||||||
├── processing - job files that are running
|
|
||||||
└── status - worker status files
|
|
||||||
```
|
|
||||||
|
|
||||||
If debugging use `--cluster-cleanup none` to leave the completed files
|
|
||||||
in the directory layout.
|
|
||||||
|
|
||||||
## Flags
|
|
||||||
|
|
||||||
### --cluster string
|
|
||||||
|
|
||||||
This enables the cluster mode. Without this flag, all the other
|
|
||||||
cluster flags are ignored. This should be given a remote which can be
|
|
||||||
a local directory, eg `/work` or a remote directory, eg `s3:bucket`.
|
|
||||||
|
|
||||||
### --cluster-batch-files int
|
|
||||||
|
|
||||||
This controls the number of files copied in a cluster batch. Setting
|
|
||||||
this larger may be more efficient but it means the statistics will be
|
|
||||||
less accurate on the controller (default 1000).
|
|
||||||
|
|
||||||
### --cluster-batch-size SizeSuffix
|
|
||||||
|
|
||||||
This controls the total size of files in a cluster batch. If the size
|
|
||||||
of the files in a batch exceeds this number then the batch will be
|
|
||||||
sent to the workers. Setting this larger may be more efficient but it
|
|
||||||
means the statistics will be less accurate on the controller. (default
|
|
||||||
1TiB)
|
|
||||||
|
|
||||||
### --cluster-cleanup ClusterCleanup
|
|
||||||
|
|
||||||
Controls which cluster files get cleaned up.
|
|
||||||
|
|
||||||
- `full` - clean all work files (default)
|
|
||||||
- `completed` - clean completed work files but leave the errors and status
|
|
||||||
- `none` - leave all the file (useful for debugging)
|
|
||||||
|
|
||||||
### --cluster-id string
|
|
||||||
|
|
||||||
Set an ID for the rclone instance. This can be a string or a number.
|
|
||||||
An ID of 0 will become the controller otherwise the instance will
|
|
||||||
become a worker. If this flag isn't supplied or the value is empty,
|
|
||||||
then a random string will be used instead.
|
|
||||||
|
|
||||||
### --cluster-quit-workers
|
|
||||||
|
|
||||||
If this flag is set, then when the controller finishes its sync task
|
|
||||||
it will quit all the workers before it exits.
|
|
||||||
|
|
||||||
## Not implemented
|
|
||||||
|
|
||||||
Here are some features from the original design which are not
|
|
||||||
implemented yet:
|
|
||||||
|
|
||||||
- the controller will not notice if workers die or fail to complete
|
|
||||||
their tasks
|
|
||||||
- the controller does not re-assign the workers work if necessary
|
|
||||||
- the controller does not restart the sync
|
|
||||||
- the workers do not write any status files (but the stats are
|
|
||||||
correctly accounted)
|
|
||||||
@@ -3315,15 +3315,6 @@ For the remote control options and for instructions on how to remote control rcl
|
|||||||
|
|
||||||
See [the remote control section](/rc/).
|
See [the remote control section](/rc/).
|
||||||
|
|
||||||
## Cluster
|
|
||||||
|
|
||||||
For the cluster options and for instructions on how to cluster rclone:
|
|
||||||
|
|
||||||
- `--cluster`
|
|
||||||
- Anything starting with `--cluster-`
|
|
||||||
|
|
||||||
See the [cluster section](/cluster/).
|
|
||||||
|
|
||||||
## Logging
|
## Logging
|
||||||
|
|
||||||
rclone has 4 levels of logging, `ERROR`, `NOTICE`, `INFO` and `DEBUG`.
|
rclone has 4 levels of logging, `ERROR`, `NOTICE`, `INFO` and `DEBUG`.
|
||||||
|
|||||||
@@ -100,9 +100,7 @@ Choose a number from below, or type in your own value
|
|||||||
\ "us-east1"
|
\ "us-east1"
|
||||||
13 / Northern Virginia.
|
13 / Northern Virginia.
|
||||||
\ "us-east4"
|
\ "us-east4"
|
||||||
14 / Ohio.
|
14 / Oregon.
|
||||||
\ "us-east5"
|
|
||||||
15 / Oregon.
|
|
||||||
\ "us-west1"
|
\ "us-west1"
|
||||||
location> 12
|
location> 12
|
||||||
The storage class to use when storing objects in Google Cloud Storage.
|
The storage class to use when storing objects in Google Cloud Storage.
|
||||||
|
|||||||
@@ -37,133 +37,89 @@ Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
|
|||||||
|
|
||||||
## Authentication
|
## Authentication
|
||||||
|
|
||||||
Authentication in Jottacloud is in general based on OAuth and OpenID Connect
|
Authentication in Jottacloud is in general based on OAuth 2.0 and OpenID
|
||||||
(OIDC). There are different variants to choose from, depending on which service
|
Connect (OIDC). There are different variants to choose from, described below.
|
||||||
you are using, e.g. a white-label service may only support one of them. Note
|
Some of the variants are only supported by the official service and not
|
||||||
that there is no documentation to rely on, so the descriptions provided here
|
white-label services, so this must be taken into consideration when choosing.
|
||||||
are based on observations and may not be accurate.
|
|
||||||
|
|
||||||
Jottacloud uses two optional OAuth security mechanisms, referred to as "Refresh
|
To access your account from multiple instances of rclone, you need to configure
|
||||||
Token Rotation" and "Automatic Reuse Detection", which has some implications.
|
each of them separately. E.g. you create a Jottacloud remote with rclone in one
|
||||||
Access tokens normally have one hour expiry, after which they need to be
|
location, and copy the configuration file to a second location where you also
|
||||||
refreshed (rotated), an operation that requires the refresh token to be
|
want to run rclone and access the same remote. Then you need to replace the
|
||||||
supplied. Rclone does this automatically. This is standard OAuth. But in
|
token for one of them, using the [config reconnect](https://rclone.org/commands/rclone_config_reconnect/)
|
||||||
Jottacloud, such a refresh operation not only creates a new access token, but
|
command. For standard authentication (described below) this means you will have
|
||||||
also refresh token, and invalidates the existing refresh token, the one that
|
to generate a new personal login token and supply as input. If you do not do
|
||||||
was supplied. It keeps track of the history of refresh tokens, sometimes
|
this, the token may easily end up being invalidated, resulting in both
|
||||||
referred to as a token family, descending from the original refresh token that
|
instances failing with an error message something along the lines of:
|
||||||
was issued after the initial authentication. This is used to detect any
|
|
||||||
attempts at reusing old refresh tokens, and trigger an immedate invalidation of
|
|
||||||
the current refresh token, and effectively the entire refresh token family.
|
|
||||||
|
|
||||||
When the current refresh token has been invalidated, next time rclone tries to
|
|
||||||
perform a token refresh, it will fail with an error message something along the
|
|
||||||
lines of:
|
|
||||||
|
|
||||||
```text
|
```text
|
||||||
CRITICAL: Failed to create file system for "remote:": (...): couldn't fetch token: invalid_grant: maybe token expired? - try refreshing with "rclone config reconnect remote:"
|
oauth2: cannot fetch token: 400 Bad Request
|
||||||
|
Response: {"error":"invalid_grant","error_description":"Stale token"}
|
||||||
```
|
```
|
||||||
|
|
||||||
If you run rclone with verbosity level 2 (`-vv`), you will see a debug message
|
The background for this is that OAuth tokens from Jottacloud normally have one
|
||||||
with an additional error description from the OAuth response:
|
hour expiry, after which they will be automatically refreshed by rclone.
|
||||||
|
Jottacloud will then refresh not only the access token, but also the refresh
|
||||||
|
token. Any requests using a previous refresh token will be flagged, and lead
|
||||||
|
to the stale token error. When this happens, you need to replace the token as
|
||||||
|
described above to be able to use your remote again.
|
||||||
|
|
||||||
```text
|
Each time you are granted access with a new token, it will listed in the web
|
||||||
DEBUG : remote: got fatal oauth error: oauth2: "invalid_grant" "Session doesn't have required client"
|
interface under "My logged in devices". From the right side of that list you
|
||||||
```
|
can click the "X" button to revoke access. This will effectively disable the
|
||||||
|
refresh token, which means you will still have access using an existing access
|
||||||
(The error description used to be "Stale token" instead of "Session doesn't
|
token until that expires, but you will not be able to refresh it.
|
||||||
have required client", so you may see references to that in older descriptions
|
|
||||||
of this situation.)
|
|
||||||
|
|
||||||
When this happens, you need to re-authenticate to be able to use your remote
|
|
||||||
again, e.g. using the [config reconnect](/commands/rclone_config_reconnect/)
|
|
||||||
command as suggested in the error message. This will create an entirely new
|
|
||||||
refresh token (family).
|
|
||||||
|
|
||||||
A typical example of how you may end up in this situation, is if you create
|
|
||||||
a Jottacloud remote with rclone in one location, and then copy the
|
|
||||||
configuration file to a second location where you start using rclone to access
|
|
||||||
the same remote. Eventually there will now be a token refresh attempt with an
|
|
||||||
invalidated token, i.e. refresh token reuse, resulting in both instances
|
|
||||||
starting to fail with the "invalid_grant" error. It is possible to copy remote
|
|
||||||
configurations, but you must then replace the token for one of them using the
|
|
||||||
[config reconnect](https://rclone.org/commands/rclone_config_reconnect/)
|
|
||||||
command.
|
|
||||||
|
|
||||||
You can get some overview of your active tokens in your service's web user
|
|
||||||
interface, if you navigate to "Settings" and then "Security" (in which case
|
|
||||||
you end up at <https://www.jottacloud.com/web/secure> or similar). Down on
|
|
||||||
that page you have a section "My logged in devices". This contains a list
|
|
||||||
of entries which seemingly represents currently valid refresh tokens, or
|
|
||||||
refresh token families. From the right side of that list you can click a
|
|
||||||
button ("X") to revoke (invalidate) it, which means you will still have access
|
|
||||||
using an existing access token until that expires, but you will not be able to
|
|
||||||
perform a token refresh. Note that this entire "My logged in devices" feature
|
|
||||||
seem to behave a bit differently with different authentication variants and
|
|
||||||
with use of the different (white-label) services.
|
|
||||||
|
|
||||||
### Standard
|
### Standard
|
||||||
|
|
||||||
This is an OAuth variant designed for command-line applications. It is
|
This is an OAuth variant designed for command-line applications. It is
|
||||||
primarily supported by the official service (jottacloud.com), but may also be
|
primarily supported by the official service (jottacloud.com), but may also be
|
||||||
supported by some of the white-label services. The information necessary to be
|
supported by some of the white-label services. The specific domain name and
|
||||||
able to perform authentication, like domain name and endpoint to connect to,
|
endpoint to connect to are found automatically (it is encoded into the supplied
|
||||||
are found automatically (it is encoded into the supplied login token, described
|
login token, described next).
|
||||||
next), so you do not need to specify which service to configure.
|
|
||||||
|
|
||||||
When configuring a remote, you are asked to enter a single-use personal login
|
When configuring a remote, you are asked to enter a single-use personal login
|
||||||
token, which you must manually generate from the account security settings in
|
token, which you must manually generate from the account security settings in
|
||||||
the service's web interface. You do not need a web browser on the same machine
|
the service's web interface. You do not need a web browser on the same machine
|
||||||
like with traditional OAuth, but need to use a web browser somewhere, and be
|
like with traditional OAuth, but need to use a web browser somewhere, and be
|
||||||
able to be copy the generated string into your rclone configuration session.
|
able to be copy the generated string into your rclone configuration session.
|
||||||
Log in to your service's web user interface, navigate to "Settings" and then
|
|
||||||
"Security", or, for the official service, use the direct link presented to you
|
|
||||||
by rclone when configuring the remote: <https://www.jottacloud.com/web/secure>.
|
|
||||||
Scroll down to the section "Personal login token", and click the "Generate"
|
|
||||||
button. Copy the presented string and paste it where rclone asks for it. Rclone
|
|
||||||
will then use this to perform an initial token request, and receive a regular
|
|
||||||
OAuth token which it stores in your remote configuration. There will then also
|
|
||||||
be a new entry in the "My logged in devices" list in the web interface, with
|
|
||||||
device name and application name "Jottacloud CLI".
|
|
||||||
|
|
||||||
Each time a new token is created this way, i.e. a new personal login token is
|
Log in to your account, go to "Settings" and then "Security", or use the direct
|
||||||
generated and traded in for an OAuth token, you get an entirely new refresh
|
link presented to you by rclone when configuring the remote:
|
||||||
token family, with a new entry in the "My logged in devices". You can create as
|
<https://www.jottacloud.com/web/secure>. Scroll down to the section "Personal
|
||||||
many remotes as you want, and use multiple instances of rclone on same or
|
login token", and click the "Generate" button. Note that if you are using a
|
||||||
different machine, as long as you configure them separately like this, and not
|
white-label service you probably can't use the direct link, you need to find
|
||||||
get your self into the refresh token reuse issue described above.
|
the same page in their dedicated web interface, and also it may be in a
|
||||||
|
different location than described above.
|
||||||
|
|
||||||
|
When you have successfully authenticated using a personal login token, which
|
||||||
|
means you have received a proper OAuth token, there will be an entry in the
|
||||||
|
"My logged in devices" list in the web interface. It will be listed with
|
||||||
|
application name "Jottacloud CLI".
|
||||||
|
|
||||||
### Traditional
|
### Traditional
|
||||||
|
|
||||||
Jottacloud also supports a more traditional OAuth variant. Most of the
|
Jottacloud also supports a more traditional OAuth variant. Most of the
|
||||||
white-label services support this, and for many of them this is the only
|
white-label services supports this, and often only this as they do not support
|
||||||
alternative because they do not support personal login tokens. This method
|
personal login tokens. This method relies on pre-defined domain names and
|
||||||
relies on pre-defined service-specific domain names and endpoints, and rclone
|
endpoints, and rclone must therefore explicitly add any white-label services
|
||||||
need you to specify which service to configure. This also means that any
|
that should be supported.
|
||||||
changes to existing or additions of new white-label services needs an update
|
|
||||||
in the rclone backend implementation.
|
|
||||||
|
|
||||||
When configuring a remote, you must interactively login to an OAuth
|
When configuring a remote, you must interactively login to an OAuth
|
||||||
authorization web site, and a one-time authorization code is sent back to
|
authorization web site, and a one-time authorization code are automatically
|
||||||
rclone behind the scene, which it uses to request an OAuth token. This means
|
sent back to rclone, which it uses to request a token.
|
||||||
that you need to be on a machine with an internet-connected web browser. If you
|
|
||||||
need it on a machine where this is not the case, then you will have to create
|
|
||||||
the configuration on a different machine and copy it from there. The Jottacloud
|
|
||||||
backend does not support the `rclone authorize` command. See the
|
|
||||||
[remote setup docs](/remote_setup) for details.
|
|
||||||
|
|
||||||
Jottacloud exerts some form of strict session management when authenticating
|
Note that when setting this up, you need to be on a machine with an
|
||||||
using this method. This leads to some unexpected cases of the "invalid_grant"
|
internet-connected web browser. If you need it on a machine where this is not
|
||||||
error described above, and effectively limits you to only use of a single
|
the case, then you will have to create the configuration on a different machine
|
||||||
active authentication on the same machine. I.e. you can only create a single
|
and copy it from there. The jottacloud backend does not support the
|
||||||
rclone remote, and you can't even log in with the service's official desktop
|
`rclone authorize` command. See the [remote setup docs](/remote_setup) for
|
||||||
client while having a rclone remote configured, or else you will eventually get
|
details.
|
||||||
all sessions invalidated and are forced to re-authenticate.
|
|
||||||
|
|
||||||
When you have successfully authenticated, there will be an entry in the
|
When you have successfully authenticated, there will be an entry in the
|
||||||
"My logged in devices" list in the web interface representing your session. It
|
"My logged in devices" list in the web interface. It will typically be listed
|
||||||
will typically be listed with application name "Jottacloud for Desktop" or
|
with application name "Jottacloud for Desktop" or similar (it depends on the
|
||||||
similar (it depends on the white-label service configuration).
|
white-label service configuration).
|
||||||
|
|
||||||
### Legacy
|
### Legacy
|
||||||
|
|
||||||
@@ -225,22 +181,20 @@ Type of authentication.
|
|||||||
Choose a number from below, or type in an existing value of type string.
|
Choose a number from below, or type in an existing value of type string.
|
||||||
Press Enter for the default (standard).
|
Press Enter for the default (standard).
|
||||||
/ Standard authentication.
|
/ Standard authentication.
|
||||||
| This is primarily supported by the official service, but may also be
|
| This is primarily supported by the official service, but may also be supported
|
||||||
| supported by some white-label services. It is designed for command-line
|
| by some of the white-label services. It is designed for command-line
|
||||||
1 | applications, and you will be asked to enter a single-use personal login
|
1 | applications, and you will be asked to enter a single-use personal login token
|
||||||
| token which you must manually generate from the account security settings
|
| which you must manually generate from the account security settings in the
|
||||||
| in the web interface of your service.
|
| web interface of your service.
|
||||||
\ (standard)
|
\ (standard)
|
||||||
/ Traditional authentication.
|
/ Traditional authentication.
|
||||||
| This is supported by the official service and all white-label services
|
| This is supported by the official service and most of the white-label
|
||||||
| that rclone knows about. You will be asked which service to connect to.
|
2 | services, you will be asked which service to connect to. You need to be on
|
||||||
2 | It has a limitation of only a single active authentication at a time. You
|
| a machine with an internet-connected web browser.
|
||||||
| need to be on, or have access to, a machine with an internet-connected
|
|
||||||
| web browser.
|
|
||||||
\ (traditional)
|
\ (traditional)
|
||||||
/ Legacy authentication.
|
/ Legacy authentication.
|
||||||
3 | This is no longer supported by any known services and not recommended
|
3 | This is no longer supported by any known services and not recommended used.
|
||||||
| used. You will be asked for your account's username and password.
|
| You will be asked for your account's username and password.
|
||||||
\ (legacy)
|
\ (legacy)
|
||||||
config_type> 1
|
config_type> 1
|
||||||
|
|
||||||
|
|||||||
@@ -131,109 +131,6 @@ section and followed by the privacy policy of Rclone.
|
|||||||
local configuration file.
|
local configuration file.
|
||||||
- Rclone does not share any user data with third parties.
|
- Rclone does not share any user data with third parties.
|
||||||
|
|
||||||
## User Data Collection and Storage
|
|
||||||
|
|
||||||
This section outlines how rclone accesses, uses, stores, and shares
|
|
||||||
user data obtained from service provider APIs. Our use of information
|
|
||||||
received from provider APIs will adhere to the provider API Services
|
|
||||||
User Data Policy, including the Limited Use requirements.
|
|
||||||
|
|
||||||
Rclone is a client-side command-line program that users run on their
|
|
||||||
own computers to manage their files on cloud storage services. The
|
|
||||||
rclone project does not operate any servers that store or process your
|
|
||||||
personal data. All data access and processing occurs directly on the
|
|
||||||
user's machine and between the user's machine and the provider API
|
|
||||||
servers.
|
|
||||||
|
|
||||||
### Data Accessed
|
|
||||||
|
|
||||||
When you authorize rclone to access your files on your provider, it
|
|
||||||
may access the following types of data, depending on the permissions
|
|
||||||
you grant:
|
|
||||||
|
|
||||||
- Files: Rclone accesses the metadata (filenames, sizes, modification
|
|
||||||
times, etc.) and content of your files and folders on your provider.
|
|
||||||
This is necessary for rclone to perform file management tasks like
|
|
||||||
copying, syncing, moving, and listing files.
|
|
||||||
|
|
||||||
- Authentication Tokens: Rclone requests OAuth 2.0 access tokens from
|
|
||||||
the provider. These tokens are used to authenticate your requests to
|
|
||||||
the provider's APIs and prove that you have granted rclone
|
|
||||||
permission to access your data.
|
|
||||||
|
|
||||||
- Basic Profile Information: As part of the authentication process,
|
|
||||||
rclone may receive your email address to identify the connected
|
|
||||||
account within the rclone configuration.
|
|
||||||
|
|
||||||
### Data Usage
|
|
||||||
|
|
||||||
Rclone uses the user data it accesses solely to provide its core
|
|
||||||
functionality, which is initiated and controlled entirely by you, the
|
|
||||||
user. Specifically:
|
|
||||||
|
|
||||||
- The data is used to perform file transfer and management operations
|
|
||||||
(such as `copy`, `sync`, `move`, `list`, `delete`) between your
|
|
||||||
local machine and your provider account as per your direct commands.
|
|
||||||
|
|
||||||
- Authentication tokens are used exclusively to make authorized API
|
|
||||||
calls to the provider's services on your behalf.
|
|
||||||
|
|
||||||
- Your email address is used locally to help you identify which
|
|
||||||
provider account is configured.
|
|
||||||
|
|
||||||
Rclone does not use your data for any other purpose, such as
|
|
||||||
advertising, marketing, or analysis by the rclone project developers.
|
|
||||||
|
|
||||||
### Data Sharing
|
|
||||||
|
|
||||||
Rclone does not share your user data with any third parties.
|
|
||||||
|
|
||||||
All data transfers initiated by the user occur directly between the
|
|
||||||
machine where rclone is running and the provider's servers. The rclone
|
|
||||||
project and its developers **never** have access to your
|
|
||||||
authentication tokens or your file data.
|
|
||||||
|
|
||||||
### Data Storage & Protection
|
|
||||||
|
|
||||||
- Configuration Data: Rclone stores its configuration, including the
|
|
||||||
OAuth 2.0 tokens required to access your provider account, in a
|
|
||||||
configuration file (`rclone.conf`) located on your local machine.
|
|
||||||
|
|
||||||
- Security: You are responsible for securing this configuration
|
|
||||||
file on your own computer. Rclone provides a built-in option to
|
|
||||||
encrypt the configuration file with a password for an added layer of
|
|
||||||
security. We strongly recommend using this feature.
|
|
||||||
|
|
||||||
- File Data: Your file data is only held in your computer's memory
|
|
||||||
(RAM) temporarily during transfer operations. Rclone does not
|
|
||||||
permanently store your file content on your local disk unless you
|
|
||||||
explicitly command it to do so (e.g., by running a `copy` command
|
|
||||||
from the provider to a local directory).
|
|
||||||
|
|
||||||
### Data Retention & Deletion
|
|
||||||
|
|
||||||
Rclone gives you full control over your data.
|
|
||||||
|
|
||||||
- Data Retention: Rclone retains the configuration data, including
|
|
||||||
authentication tokens, on your local machine for as long as you keep
|
|
||||||
the configuration file. This allows you to use rclone without having
|
|
||||||
to re-authenticate for every session.
|
|
||||||
|
|
||||||
- Data Deletion: You can delete your data and revoke rclone's
|
|
||||||
access at any time through one of the following methods:
|
|
||||||
|
|
||||||
1. Local Deletion: You can delete the specific provider
|
|
||||||
configuration from your `rclone.conf` file or delete the entire
|
|
||||||
file itself. This will permanently remove the authentication
|
|
||||||
tokens from your machine.
|
|
||||||
|
|
||||||
2. Revoking Access via the provider: You can revoke rclone's
|
|
||||||
access to your provider directly from your the providers's
|
|
||||||
security settings page. This will invalidate the authentication
|
|
||||||
tokens, and rclone will no longer be able to access your data.
|
|
||||||
For example, if you are using Google you can manage your permissions
|
|
||||||
[on the Google permissions page](https://myaccount.google.com/permissions).
|
|
||||||
|
|
||||||
## Resources & Further Information
|
## Resources & Further Information
|
||||||
|
|
||||||
- [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
- [Data Protection Act 1998](http://www.legislation.gov.uk/ukpga/1998/29/contents)
|
||||||
|
|||||||
@@ -21,9 +21,7 @@ The S3 backend can be used with a number of different providers:
|
|||||||
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
|
||||||
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
|
||||||
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
{{< provider name="Exaba" home="https://exaba.com/" config="/s3/#exaba" >}}
|
||||||
{{< provider name="FileLu S5 (S3-Compatible Object Storage)" home="https://s5lu.com/" config="/s3/#filelu-s5" >}}
|
|
||||||
{{< provider name="GCS" home="https://cloud.google.com/storage/docs" config="/s3/#google-cloud-storage" >}}
|
{{< provider name="GCS" home="https://cloud.google.com/storage/docs" config="/s3/#google-cloud-storage" >}}
|
||||||
{{< provider name="Hetzner" home="https://www.hetzner.com/storage/object-storage/" config="/s3/#hetzner" >}}
|
|
||||||
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
|
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
|
||||||
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
|
||||||
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
|
||||||
@@ -40,7 +38,6 @@ The S3 backend can be used with a number of different providers:
|
|||||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||||
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
{{< provider name="Pure Storage FlashBlade" home="https://www.purestorage.com/products/unstructured-data-storage.html" config="/s3/#pure-storage-flashblade" >}}
|
||||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||||
{{< provider name="Rabata Cloud Storage" home="https://rabata.io" config="/s3/#Rabata" >}}
|
|
||||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
||||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||||
@@ -3403,150 +3400,6 @@ endpoint = https://storage.googleapis.com
|
|||||||
|
|
||||||
This is Google bug [#312292516](https://issuetracker.google.com/u/0/issues/312292516).
|
This is Google bug [#312292516](https://issuetracker.google.com/u/0/issues/312292516).
|
||||||
|
|
||||||
### Hetzner Object Storage {#hetzner}
|
|
||||||
|
|
||||||
Here is an example of making a [Hetzner Object Storage](https://www.hetzner.com/storage/object-storage/)
|
|
||||||
configuration. First run:
|
|
||||||
|
|
||||||
rclone config
|
|
||||||
|
|
||||||
This will guide you through an interactive setup process.
|
|
||||||
|
|
||||||
```
|
|
||||||
No remotes found, make a new one?
|
|
||||||
n) New remote
|
|
||||||
s) Set configuration password
|
|
||||||
q) Quit config
|
|
||||||
n/s/q> n
|
|
||||||
|
|
||||||
Enter name for new remote.
|
|
||||||
name> my-hetzner
|
|
||||||
Option Storage.
|
|
||||||
Type of storage to configure.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
[snip]
|
|
||||||
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, Hetzner, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Magalu, Minio, Netease, Outscale, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, Selectel, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others
|
|
||||||
\ (s3)
|
|
||||||
[snip]
|
|
||||||
Storage> s3
|
|
||||||
Option provider.
|
|
||||||
Choose your S3 provider.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
[snip]
|
|
||||||
XX / Hetzner Object Storage
|
|
||||||
\ (Hetzner)
|
|
||||||
[snip]
|
|
||||||
provider> Hetzner
|
|
||||||
Option env_auth.
|
|
||||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
|
||||||
Only applies if access_key_id and secret_access_key is blank.
|
|
||||||
Choose a number from below, or type in your own boolean value (true or false).
|
|
||||||
Press Enter for the default (false).
|
|
||||||
1 / Enter AWS credentials in the next step.
|
|
||||||
\ (false)
|
|
||||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
|
||||||
\ (true)
|
|
||||||
env_auth>
|
|
||||||
Option access_key_id.
|
|
||||||
AWS Access Key ID.
|
|
||||||
Leave blank for anonymous access or runtime credentials.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
access_key_id> ACCESS_KEY
|
|
||||||
Option secret_access_key.
|
|
||||||
AWS Secret Access Key (password).
|
|
||||||
Leave blank for anonymous access or runtime credentials.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
secret_access_key> SECRET_KEY
|
|
||||||
Option region.
|
|
||||||
Region to connect to.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
1 / Helsinki
|
|
||||||
\ (hel1)
|
|
||||||
2 / Falkenstein
|
|
||||||
\ (fsn1)
|
|
||||||
3 / Nuremberg
|
|
||||||
\ (nbg1)
|
|
||||||
region>
|
|
||||||
Option endpoint.
|
|
||||||
Endpoint for Hetzner Object Storage
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
1 / Helsinki
|
|
||||||
\ (hel1.your-objectstorage.com)
|
|
||||||
2 / Falkenstein
|
|
||||||
\ (fsn1.your-objectstorage.com)
|
|
||||||
3 / Nuremberg
|
|
||||||
\ (nbg1.your-objectstorage.com)
|
|
||||||
endpoint>
|
|
||||||
Option location_constraint.
|
|
||||||
Location constraint - must be set to match the Region.
|
|
||||||
Leave blank if not sure. Used when creating buckets only.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
location_constraint>
|
|
||||||
Option acl.
|
|
||||||
Canned ACL used when creating buckets and storing or copying objects.
|
|
||||||
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
|
|
||||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
|
||||||
Note that this ACL is applied when server-side copying objects as S3
|
|
||||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
|
||||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
|
||||||
the default (private) will be used.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
/ Owner gets FULL_CONTROL.
|
|
||||||
1 | No one else has access rights (default).
|
|
||||||
\ (private)
|
|
||||||
/ Owner gets FULL_CONTROL.
|
|
||||||
2 | The AllUsers group gets READ access.
|
|
||||||
\ (public-read)
|
|
||||||
acl>
|
|
||||||
Edit advanced config?
|
|
||||||
y) Yes
|
|
||||||
n) No (default)
|
|
||||||
y/n>
|
|
||||||
Configuration complete.
|
|
||||||
Options:
|
|
||||||
- type: s3
|
|
||||||
- provider: Hetzner
|
|
||||||
- access_key_id: ACCESS_KEY
|
|
||||||
- secret_access_key: SECRET_KEY
|
|
||||||
Keep this "my-hetzner" remote?
|
|
||||||
y) Yes this is OK (default)
|
|
||||||
e) Edit this remote
|
|
||||||
d) Delete this remote
|
|
||||||
y/e/d>
|
|
||||||
Current remotes:
|
|
||||||
|
|
||||||
Name Type
|
|
||||||
==== ====
|
|
||||||
my-hetzner s3
|
|
||||||
|
|
||||||
e) Edit existing remote
|
|
||||||
n) New remote
|
|
||||||
d) Delete remote
|
|
||||||
r) Rename remote
|
|
||||||
c) Copy remote
|
|
||||||
s) Set configuration password
|
|
||||||
q) Quit config
|
|
||||||
e/n/d/r/c/s/q>
|
|
||||||
```
|
|
||||||
|
|
||||||
This will leave the config file looking like this.
|
|
||||||
|
|
||||||
```
|
|
||||||
[my-hetzner]
|
|
||||||
type = s3
|
|
||||||
provider = Hetzner
|
|
||||||
access_key_id = ACCESS_KEY
|
|
||||||
secret_access_key = SECRET_KEY
|
|
||||||
region = hel1
|
|
||||||
endpoint = hel1.your-objectstorage.com
|
|
||||||
acl = private
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
### Huawei OBS {#huawei-obs}
|
### Huawei OBS {#huawei-obs}
|
||||||
|
|
||||||
Object Storage Service (OBS) provides stable, secure, efficient, and easy-to-use cloud storage that lets you store virtually any volume of unstructured data in any format and access it from anywhere.
|
Object Storage Service (OBS) provides stable, secure, efficient, and easy-to-use cloud storage that lets you store virtually any volume of unstructured data in any format and access it from anywhere.
|
||||||
@@ -5782,244 +5635,6 @@ Name Type
|
|||||||
qiniu s3
|
qiniu s3
|
||||||
```
|
```
|
||||||
|
|
||||||
### FileLu S5 {#filelu-s5}
|
|
||||||
|
|
||||||
[FileLu S5 Object Storage](https://s5lu.com) is an S3-compatible object storage system.
|
|
||||||
It provides multiple region options (Global, US-East, EU-Central, AP-Southeast, and ME-Central) while using a single endpoint (`s5lu.com`).
|
|
||||||
FileLu S5 is designed for scalability, security, and simplicity, with predictable pricing and no hidden charges for data transfers or API requests.
|
|
||||||
|
|
||||||
Here is an example of making a configuration. First run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
rclone config
|
|
||||||
```
|
|
||||||
|
|
||||||
This will guide you through an interactive setup process.
|
|
||||||
|
|
||||||
```text
|
|
||||||
No remotes found, make a new one\?
|
|
||||||
n) New remote
|
|
||||||
s) Set configuration password
|
|
||||||
q) Quit config
|
|
||||||
n/s/q> n
|
|
||||||
|
|
||||||
Enter name for new remote.
|
|
||||||
name> s5lu
|
|
||||||
|
|
||||||
Option Storage.
|
|
||||||
Type of storage to configure.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
[snip]
|
|
||||||
XX / Amazon S3 Compliant Storage Providers including AWS,... FileLu, ...
|
|
||||||
\ (s3)
|
|
||||||
[snip]
|
|
||||||
Storage> s3
|
|
||||||
|
|
||||||
Option provider.
|
|
||||||
Choose your S3 provider.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
[snip]
|
|
||||||
XX / FileLu S5 Object Storage
|
|
||||||
\ (FileLu)
|
|
||||||
[snip]
|
|
||||||
provider> FileLu
|
|
||||||
|
|
||||||
Option env_auth.
|
|
||||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
|
||||||
Only applies if access_key_id and secret_access_key is blank.
|
|
||||||
Choose a number from below, or type in your own boolean value (true or false).
|
|
||||||
Press Enter for the default (false).
|
|
||||||
1 / Enter AWS credentials in the next step.
|
|
||||||
\ (false)
|
|
||||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
|
||||||
\ (true)
|
|
||||||
env_auth>
|
|
||||||
|
|
||||||
Option access_key_id.
|
|
||||||
AWS Access Key ID.
|
|
||||||
Leave blank for anonymous access or runtime credentials.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
access_key_id> XXX
|
|
||||||
|
|
||||||
Option secret_access_key.
|
|
||||||
AWS Secret Access Key (password).
|
|
||||||
Leave blank for anonymous access or runtime credentials.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
secret_access_key> XXX
|
|
||||||
|
|
||||||
Option endpoint.
|
|
||||||
Endpoint for S3 API.
|
|
||||||
Required when using an S3 clone.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
1 / Global
|
|
||||||
\ (global)
|
|
||||||
2 / North America (US-East)
|
|
||||||
\ (us-east)
|
|
||||||
3 / Europe (EU-Central)
|
|
||||||
\ (eu-central)
|
|
||||||
4 / Asia Pacific (AP-Southeast)
|
|
||||||
\ (ap-southeast)
|
|
||||||
5 / Middle East (ME-Central)
|
|
||||||
\ (me-central)
|
|
||||||
region> 1
|
|
||||||
|
|
||||||
Edit advanced config?
|
|
||||||
y) Yes
|
|
||||||
n) No (default)
|
|
||||||
y/n> n
|
|
||||||
|
|
||||||
Configuration complete.
|
|
||||||
Options:
|
|
||||||
- type: s3
|
|
||||||
- provider: FileLu
|
|
||||||
- access_key_id: XXX
|
|
||||||
- secret_access_key: XXX
|
|
||||||
- endpoint: s5lu.com
|
|
||||||
Keep this "s5lu" remote?
|
|
||||||
y) Yes this is OK (default)
|
|
||||||
e) Edit this remote
|
|
||||||
d) Delete this remote
|
|
||||||
y/e/d> y
|
|
||||||
```
|
|
||||||
|
|
||||||
This will leave the config file looking like this.
|
|
||||||
|
|
||||||
```
|
|
||||||
[s5lu]
|
|
||||||
type = s3
|
|
||||||
provider = FileLu
|
|
||||||
access_key_id = XXX
|
|
||||||
secret_access_key = XXX
|
|
||||||
endpoint = s5lu.com
|
|
||||||
```
|
|
||||||
|
|
||||||
### Rabata {#Rabata}
|
|
||||||
|
|
||||||
[Rabata](https://rabata.io) is an S3-compatible secure cloud storage service that offers flat, transparent pricing (no API request fees)
|
|
||||||
while supporting standard S3 APIs. It is suitable for backup, application storage,media workflows, and archive use cases.
|
|
||||||
|
|
||||||
Server side copy is not implemented with Rabata, also meaning modification time of objects cannot be updated.
|
|
||||||
|
|
||||||
Rclone config:
|
|
||||||
|
|
||||||
```
|
|
||||||
rclone config
|
|
||||||
No remotes found, make a new one?
|
|
||||||
n) New remote
|
|
||||||
s) Set configuration password
|
|
||||||
q) Quit config
|
|
||||||
n/s/q> n
|
|
||||||
|
|
||||||
Enter name for new remote.
|
|
||||||
name> Rabata
|
|
||||||
|
|
||||||
Option Storage.
|
|
||||||
Type of storage to configure.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
[snip]
|
|
||||||
XX / Amazon S3 Compliant Storage Providers including AWS, ...
|
|
||||||
\ (s3)
|
|
||||||
[snip]
|
|
||||||
Storage> s3
|
|
||||||
|
|
||||||
Option provider.
|
|
||||||
Choose your S3 provider.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
[snip]
|
|
||||||
XX / Rabata Cloud Storage
|
|
||||||
\ (Rabata)
|
|
||||||
[snip]
|
|
||||||
provider> Rabata
|
|
||||||
|
|
||||||
Option env_auth.
|
|
||||||
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
|
|
||||||
Only applies if access_key_id and secret_access_key is blank.
|
|
||||||
Choose a number from below, or type in your own boolean value (true or false).
|
|
||||||
Press Enter for the default (false).
|
|
||||||
1 / Enter AWS credentials in the next step.
|
|
||||||
\ (false)
|
|
||||||
2 / Get AWS credentials from the environment (env vars or IAM).
|
|
||||||
\ (true)
|
|
||||||
env_auth>
|
|
||||||
|
|
||||||
Option access_key_id.
|
|
||||||
AWS Access Key ID.
|
|
||||||
Leave blank for anonymous access or runtime credentials.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
access_key_id> ACCESS_KEY_ID
|
|
||||||
|
|
||||||
Option secret_access_key.
|
|
||||||
AWS Secret Access Key (password).
|
|
||||||
Leave blank for anonymous access or runtime credentials.
|
|
||||||
Enter a value. Press Enter to leave empty.
|
|
||||||
secret_access_key> SECRET_ACCESS_KEY
|
|
||||||
|
|
||||||
Option region.
|
|
||||||
Region where your bucket will be created and your data stored.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
1 / US East (N. Virginia)
|
|
||||||
\ (us-east-1)
|
|
||||||
2 / EU (Ireland)
|
|
||||||
\ (eu-west-1)
|
|
||||||
3 / EU (London)
|
|
||||||
\ (eu-west-2)
|
|
||||||
region> 3
|
|
||||||
|
|
||||||
Option endpoint.
|
|
||||||
Endpoint for Rabata Object Storage.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
1 / US East (N. Virginia)
|
|
||||||
\ (s3.us-east-1.rabata.io)
|
|
||||||
2 / EU West (Ireland)
|
|
||||||
\ (s3.eu-west-1.rabata.io)
|
|
||||||
3 / EU West (London)
|
|
||||||
\ (s3.eu-west-2.rabata.io)
|
|
||||||
endpoint> 3
|
|
||||||
|
|
||||||
Option location_constraint.
|
|
||||||
location where your bucket will be created and your data stored.
|
|
||||||
Choose a number from below, or type in your own value.
|
|
||||||
Press Enter to leave empty.
|
|
||||||
1 / US East (N. Virginia)
|
|
||||||
\ (us-east-1)
|
|
||||||
2 / EU (Ireland)
|
|
||||||
\ (eu-west-1)
|
|
||||||
3 / EU (London)
|
|
||||||
\ (eu-west-2)
|
|
||||||
location_constraint> 3
|
|
||||||
|
|
||||||
Edit advanced config?
|
|
||||||
y) Yes
|
|
||||||
n) No (default)
|
|
||||||
y/n> n
|
|
||||||
|
|
||||||
Configuration complete.
|
|
||||||
Options:
|
|
||||||
- type: s3
|
|
||||||
- provider: Rabata
|
|
||||||
- access_key_id: ACCESS_KEY_ID
|
|
||||||
- secret_access_key: SECRET_ACCESS_KEY
|
|
||||||
- region: eu-west-2
|
|
||||||
- endpoint: s3.eu-west-2.rabata.io
|
|
||||||
- location_constraint: eu-west-2
|
|
||||||
Keep this "rabata" remote?
|
|
||||||
y) Yes this is OK (default)
|
|
||||||
e) Edit this remote
|
|
||||||
d) Delete this remote
|
|
||||||
y/e/d> y
|
|
||||||
|
|
||||||
Current remotes:
|
|
||||||
|
|
||||||
Name Type
|
|
||||||
==== ====
|
|
||||||
rabata s3
|
|
||||||
```
|
|
||||||
|
|
||||||
### RackCorp {#RackCorp}
|
### RackCorp {#RackCorp}
|
||||||
|
|
||||||
[RackCorp Object Storage](https://www.rackcorp.com/storage/s3storage) is an S3 compatible object storage platform from your friendly cloud provider RackCorp.
|
[RackCorp Object Storage](https://www.rackcorp.com/storage/s3storage) is an S3 compatible object storage platform from your friendly cloud provider RackCorp.
|
||||||
|
|||||||
@@ -62,12 +62,11 @@ Thank you very much to our sponsors:
|
|||||||
{{< sponsor src="/img/logos/backblaze.svg" width="300" height="200" title="Visit our sponsor Backblaze" link="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715">}}
|
{{< sponsor src="/img/logos/backblaze.svg" width="300" height="200" title="Visit our sponsor Backblaze" link="https://www.backblaze.com/cloud-storage-rclonead?utm_source=rclone&utm_medium=paid&utm_campaign=rclone-website-20250715">}}
|
||||||
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
|
||||||
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
|
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
|
||||||
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}
|
|
||||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||||
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://github.com/rclone-ui/rclone-ui">}}
|
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://github.com/rclone-ui/rclone-ui">}}
|
||||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="250" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
{{< sponsor src="/img/logos/filelu-rclone.svg" width="330" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||||
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}
|
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}
|
||||||
|
|
||||||
<!-- markdownlint-restore -->
|
<!-- markdownlint-restore -->
|
||||||
|
|||||||
@@ -20,7 +20,6 @@
|
|||||||
<a class="dropdown-item" href="/gui/"><i class="fa fa-book fa-fw"></i> GUI</a>
|
<a class="dropdown-item" href="/gui/"><i class="fa fa-book fa-fw"></i> GUI</a>
|
||||||
<a class="dropdown-item" href="/rc/"><i class="fa fa-book fa-fw"></i> Remote Control</a>
|
<a class="dropdown-item" href="/rc/"><i class="fa fa-book fa-fw"></i> Remote Control</a>
|
||||||
<a class="dropdown-item" href="/remote_setup/"><i class="fa fa-book fa-fw"></i> Remote Setup</a>
|
<a class="dropdown-item" href="/remote_setup/"><i class="fa fa-book fa-fw"></i> Remote Setup</a>
|
||||||
<a class="dropdown-item" href="/cluster/"><i class="fa fa-book fa-fw"></i> Cluster</a>
|
|
||||||
<a class="dropdown-item" href="/changelog/"><i class="fa fa-book fa-fw"></i> Changelog</a>
|
<a class="dropdown-item" href="/changelog/"><i class="fa fa-book fa-fw"></i> Changelog</a>
|
||||||
<a class="dropdown-item" href="/bugs/"><i class="fa fa-book fa-fw"></i> Bugs</a>
|
<a class="dropdown-item" href="/bugs/"><i class="fa fa-book fa-fw"></i> Bugs</a>
|
||||||
<a class="dropdown-item" href="/faq/"><i class="fa fa-book fa-fw"></i> FAQ</a>
|
<a class="dropdown-item" href="/faq/"><i class="fa fa-book fa-fw"></i> FAQ</a>
|
||||||
|
|||||||
@@ -82,7 +82,7 @@ type accountValues struct {
|
|||||||
max int64 // if >=0 the max number of bytes to transfer
|
max int64 // if >=0 the max number of bytes to transfer
|
||||||
start time.Time // Start time of first read
|
start time.Time // Start time of first read
|
||||||
lpTime time.Time // Time of last average measurement
|
lpTime time.Time // Time of last average measurement
|
||||||
lpBytes int64 // Number of bytes read since last measurement
|
lpBytes int // Number of bytes read since last measurement
|
||||||
avg float64 // Moving average of last few measurements in Byte/s
|
avg float64 // Moving average of last few measurements in Byte/s
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -344,20 +344,15 @@ func (acc *Account) limitPerFileBandwidth(n int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account the read
|
// Account the read and limit bandwidth
|
||||||
func (acc *Account) accountReadN(n int64) {
|
func (acc *Account) accountRead(n int) {
|
||||||
// Update Stats
|
// Update Stats
|
||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
acc.values.lpBytes += n
|
acc.values.lpBytes += n
|
||||||
acc.values.bytes += n
|
acc.values.bytes += int64(n)
|
||||||
acc.values.mu.Unlock()
|
acc.values.mu.Unlock()
|
||||||
|
|
||||||
acc.stats.Bytes(n)
|
acc.stats.Bytes(int64(n))
|
||||||
}
|
|
||||||
|
|
||||||
// Account the read and limit bandwidth
|
|
||||||
func (acc *Account) accountRead(n int) {
|
|
||||||
acc.accountReadN(int64(n))
|
|
||||||
|
|
||||||
TokenBucket.LimitBandwidth(TokenBucketSlotAccounting, n)
|
TokenBucket.LimitBandwidth(TokenBucketSlotAccounting, n)
|
||||||
acc.limitPerFileBandwidth(n)
|
acc.limitPerFileBandwidth(n)
|
||||||
@@ -432,15 +427,6 @@ func (acc *Account) AccountRead(n int) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// AccountReadN account having read n bytes
|
|
||||||
//
|
|
||||||
// Does not obey any transfer limits, bandwidth limits, etc.
|
|
||||||
func (acc *Account) AccountReadN(n int64) {
|
|
||||||
acc.mu.Lock()
|
|
||||||
defer acc.mu.Unlock()
|
|
||||||
acc.accountReadN(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the object
|
// Close the object
|
||||||
func (acc *Account) Close() error {
|
func (acc *Account) Close() error {
|
||||||
acc.mu.Lock()
|
acc.mu.Lock()
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ func TestAccountRead(t *testing.T) {
|
|||||||
|
|
||||||
assert.True(t, acc.values.start.IsZero())
|
assert.True(t, acc.values.start.IsZero())
|
||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
assert.Equal(t, int64(0), acc.values.lpBytes)
|
assert.Equal(t, 0, acc.values.lpBytes)
|
||||||
assert.Equal(t, int64(0), acc.values.bytes)
|
assert.Equal(t, int64(0), acc.values.bytes)
|
||||||
acc.values.mu.Unlock()
|
acc.values.mu.Unlock()
|
||||||
assert.Equal(t, int64(0), stats.bytes)
|
assert.Equal(t, int64(0), stats.bytes)
|
||||||
@@ -113,7 +113,7 @@ func TestAccountRead(t *testing.T) {
|
|||||||
|
|
||||||
assert.False(t, acc.values.start.IsZero())
|
assert.False(t, acc.values.start.IsZero())
|
||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
assert.Equal(t, int64(2), acc.values.lpBytes)
|
assert.Equal(t, 2, acc.values.lpBytes)
|
||||||
assert.Equal(t, int64(2), acc.values.bytes)
|
assert.Equal(t, int64(2), acc.values.bytes)
|
||||||
acc.values.mu.Unlock()
|
acc.values.mu.Unlock()
|
||||||
assert.Equal(t, int64(2), stats.bytes)
|
assert.Equal(t, int64(2), stats.bytes)
|
||||||
@@ -145,7 +145,7 @@ func testAccountWriteTo(t *testing.T, withBuffer bool) {
|
|||||||
|
|
||||||
assert.True(t, acc.values.start.IsZero())
|
assert.True(t, acc.values.start.IsZero())
|
||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
assert.Equal(t, int64(0), acc.values.lpBytes)
|
assert.Equal(t, 0, acc.values.lpBytes)
|
||||||
assert.Equal(t, int64(0), acc.values.bytes)
|
assert.Equal(t, int64(0), acc.values.bytes)
|
||||||
acc.values.mu.Unlock()
|
acc.values.mu.Unlock()
|
||||||
assert.Equal(t, int64(0), stats.bytes)
|
assert.Equal(t, int64(0), stats.bytes)
|
||||||
@@ -159,7 +159,7 @@ func testAccountWriteTo(t *testing.T, withBuffer bool) {
|
|||||||
|
|
||||||
assert.False(t, acc.values.start.IsZero())
|
assert.False(t, acc.values.start.IsZero())
|
||||||
acc.values.mu.Lock()
|
acc.values.mu.Lock()
|
||||||
assert.Equal(t, int64(len(buf)), acc.values.lpBytes)
|
assert.Equal(t, len(buf), acc.values.lpBytes)
|
||||||
assert.Equal(t, int64(len(buf)), acc.values.bytes)
|
assert.Equal(t, int64(len(buf)), acc.values.bytes)
|
||||||
acc.values.mu.Unlock()
|
acc.values.mu.Unlock()
|
||||||
assert.Equal(t, int64(len(buf)), stats.bytes)
|
assert.Equal(t, int64(len(buf)), stats.bytes)
|
||||||
|
|||||||
@@ -1,598 +0,0 @@
|
|||||||
// Package cluster implements a machanism to distribute work over a
|
|
||||||
// cluster of rclone instances.
|
|
||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/filter"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
|
||||||
"github.com/rclone/rclone/lib/errcount"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrClusterNotConfigured is returned from creation functions.
|
|
||||||
var ErrClusterNotConfigured = errors.New("cluster is not configured")
|
|
||||||
|
|
||||||
// If we don't hear from workers in this time we assume they have timed out
|
|
||||||
// and re-assign their jobs.
|
|
||||||
const workerTimeout = 2 * time.Second
|
|
||||||
|
|
||||||
// Cluster describes the workings of the current cluster.
|
|
||||||
type Cluster struct {
|
|
||||||
jobs *Jobs
|
|
||||||
id string
|
|
||||||
batchFiles int
|
|
||||||
batchSize fs.SizeSuffix
|
|
||||||
cleanup fs.ClusterCleanup // how we cleanup cluster files
|
|
||||||
_config rc.Params // for rc
|
|
||||||
_filter rc.Params // for rc
|
|
||||||
cancel func() // stop bg job
|
|
||||||
wg sync.WaitGroup // bg job finished
|
|
||||||
quit chan struct{} // signal graceful stop
|
|
||||||
sync chan chan<- struct{} // sync the current jobs
|
|
||||||
quitWorkers bool // if set, send workers a stop signal on Shutdown
|
|
||||||
|
|
||||||
workers map[string]*WorkerStatus // worker ID => status
|
|
||||||
deadWorkers map[string]struct{}
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
currentBatch Batch
|
|
||||||
inflight map[string]Batch
|
|
||||||
shutdown bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Batch is a collection of rc tasks to do
|
|
||||||
type Batch struct {
|
|
||||||
size int64 // size in batch
|
|
||||||
Path string `json:"_path"`
|
|
||||||
Inputs []rc.Params `json:"inputs"`
|
|
||||||
Config rc.Params `json:"_config,omitempty"`
|
|
||||||
Filter rc.Params `json:"_filter,omitempty"`
|
|
||||||
|
|
||||||
trs []*accounting.Transfer // transfer for each Input
|
|
||||||
sizes []int64 // sizes for each Input
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchResult has the results of the batch as received.
|
|
||||||
type BatchResult struct {
|
|
||||||
Results []rc.Params `json:"results"`
|
|
||||||
|
|
||||||
// Error returns
|
|
||||||
Error string `json:"error"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
Input string `json:"input"`
|
|
||||||
Path string `json:"path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCluster creates a new cluster from the config in ctx.
|
|
||||||
//
|
|
||||||
// It may return nil for no cluster is configured.
|
|
||||||
func NewCluster(ctx context.Context) (*Cluster, error) {
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
if ci.Cluster == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
jobs, err := NewJobs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := &Cluster{
|
|
||||||
jobs: jobs,
|
|
||||||
id: ci.ClusterID,
|
|
||||||
quitWorkers: ci.ClusterQuitWorkers,
|
|
||||||
batchFiles: ci.ClusterBatchFiles,
|
|
||||||
batchSize: ci.ClusterBatchSize,
|
|
||||||
cleanup: ci.ClusterCleanup,
|
|
||||||
quit: make(chan struct{}),
|
|
||||||
sync: make(chan chan<- struct{}),
|
|
||||||
inflight: make(map[string]Batch),
|
|
||||||
workers: make(map[string]*WorkerStatus),
|
|
||||||
deadWorkers: make(map[string]struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure _config
|
|
||||||
configParams, err := fs.ConfigOptionsInfo.NonDefaultRC(ci)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read global config: %w", err)
|
|
||||||
}
|
|
||||||
// Remove any global cluster config
|
|
||||||
for k := range configParams {
|
|
||||||
if strings.HasPrefix(k, "Cluster") {
|
|
||||||
delete(configParams, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(configParams) != 0 {
|
|
||||||
fs.Debugf(nil, "Overridden global config: %#v", configParams)
|
|
||||||
}
|
|
||||||
c._config = rc.Params(configParams)
|
|
||||||
|
|
||||||
// Configure _filter
|
|
||||||
fi := filter.GetConfig(ctx)
|
|
||||||
if !fi.InActive() {
|
|
||||||
filterParams, err := filter.OptionsInfo.NonDefaultRC(fi)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read filter config: %w", err)
|
|
||||||
}
|
|
||||||
fs.Debugf(nil, "Overridden filter config: %#v", filterParams)
|
|
||||||
c._filter = rc.Params(filterParams)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.jobs.createDirectoryStructure(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the background worker
|
|
||||||
bgCtx, cancel := context.WithCancel(context.Background())
|
|
||||||
c.cancel = cancel
|
|
||||||
c.wg.Add(1)
|
|
||||||
go c.run(bgCtx)
|
|
||||||
|
|
||||||
fs.Logf(c.jobs.f, "Started cluster master")
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
globalClusterMu sync.Mutex
|
|
||||||
globalCluster *Cluster
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetCluster starts or gets a cluster.
|
|
||||||
//
|
|
||||||
// If no cluster is configured or the cluster can't be started then it
|
|
||||||
// returns nil.
|
|
||||||
func GetCluster(ctx context.Context) *Cluster {
|
|
||||||
globalClusterMu.Lock()
|
|
||||||
defer globalClusterMu.Unlock()
|
|
||||||
|
|
||||||
if globalCluster != nil {
|
|
||||||
return globalCluster
|
|
||||||
}
|
|
||||||
|
|
||||||
cluster, err := NewCluster(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Failed to start cluster: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if cluster != nil {
|
|
||||||
atexit.Register(func() {
|
|
||||||
err := cluster.Shutdown(context.Background())
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Failed to shutdown cluster: %v", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
globalCluster = cluster
|
|
||||||
return globalCluster
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send the current batch for processing
|
|
||||||
//
|
|
||||||
// call with c.mu held
|
|
||||||
func (c *Cluster) sendBatch(ctx context.Context) (err error) {
|
|
||||||
// Do nothing if the batch is empty
|
|
||||||
if len(c.currentBatch.Inputs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get and reset current batch
|
|
||||||
b := c.currentBatch
|
|
||||||
c.currentBatch = Batch{}
|
|
||||||
|
|
||||||
b.Path = "job/batch"
|
|
||||||
b.Config = c._config
|
|
||||||
b.Filter = c._filter
|
|
||||||
|
|
||||||
// write the pending job
|
|
||||||
name, err := c.jobs.writeJob(ctx, clusterPending, &b)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Infof(name, "written cluster batch file")
|
|
||||||
c.inflight[name] = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the command to the current batch
|
|
||||||
func (c *Cluster) addToBatch(ctx context.Context, obj fs.Object, in rc.Params, size int64, tr *accounting.Transfer) (err error) {
|
|
||||||
c.mu.Lock()
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
if c.shutdown {
|
|
||||||
return errors.New("internal error: can't add file to Shutdown cluster")
|
|
||||||
}
|
|
||||||
|
|
||||||
c.currentBatch.Inputs = append(c.currentBatch.Inputs, in)
|
|
||||||
c.currentBatch.size += size
|
|
||||||
c.currentBatch.trs = append(c.currentBatch.trs, tr)
|
|
||||||
c.currentBatch.sizes = append(c.currentBatch.sizes, size)
|
|
||||||
|
|
||||||
if c.currentBatch.size >= int64(c.batchSize) || len(c.currentBatch.Inputs) >= c.batchFiles {
|
|
||||||
err = c.sendBatch(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move does operations.Move via the cluster.
|
|
||||||
//
|
|
||||||
// Move src object to dst or fdst if nil. If dst is nil then it uses
|
|
||||||
// remote as the name of the new object.
|
|
||||||
func (c *Cluster) Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (err error) {
|
|
||||||
tr := accounting.Stats(ctx).NewTransfer(src, fdst)
|
|
||||||
if operations.SkipDestructive(ctx, src, "cluster move") {
|
|
||||||
in := tr.Account(ctx, nil)
|
|
||||||
in.DryRun(src.Size())
|
|
||||||
tr.Done(ctx, nil)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fsrc, ok := src.Fs().(fs.Fs)
|
|
||||||
if !ok {
|
|
||||||
err = errors.New("internal error: cluster move: can't cast src.Fs() to fs.Fs")
|
|
||||||
tr.Done(ctx, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
in := rc.Params{
|
|
||||||
"_path": "operations/movefile",
|
|
||||||
"dstFs": fs.ConfigStringFull(fdst),
|
|
||||||
"dstRemote": remote,
|
|
||||||
"srcFs": fs.ConfigStringFull(fsrc),
|
|
||||||
"srcRemote": src.Remote(),
|
|
||||||
}
|
|
||||||
if dst != nil {
|
|
||||||
in["dstRemote"] = dst.Remote()
|
|
||||||
}
|
|
||||||
return c.addToBatch(ctx, src, in, src.Size(), tr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy does operations.Copy via the cluster.
|
|
||||||
//
|
|
||||||
// Copy src object to dst or fdst if nil. If dst is nil then it uses
|
|
||||||
// remote as the name of the new object.
|
|
||||||
func (c *Cluster) Copy(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (err error) {
|
|
||||||
tr := accounting.Stats(ctx).NewTransfer(src, fdst)
|
|
||||||
if operations.SkipDestructive(ctx, src, "cluster copy") {
|
|
||||||
in := tr.Account(ctx, nil)
|
|
||||||
in.DryRun(src.Size())
|
|
||||||
tr.Done(ctx, nil)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fsrc, ok := src.Fs().(fs.Fs)
|
|
||||||
if !ok {
|
|
||||||
err = errors.New("internal error: cluster copy: can't cast src.Fs() to fs.Fs")
|
|
||||||
tr.Done(ctx, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
in := rc.Params{
|
|
||||||
"_path": "operations/copyfile",
|
|
||||||
"dstFs": fs.ConfigStringFull(fdst),
|
|
||||||
"dstRemote": remote,
|
|
||||||
"srcFs": fs.ConfigStringFull(fsrc),
|
|
||||||
"srcRemote": src.Remote(),
|
|
||||||
}
|
|
||||||
if dst != nil {
|
|
||||||
in["dstRemote"] = dst.Remote()
|
|
||||||
}
|
|
||||||
return c.addToBatch(ctx, src, in, src.Size(), tr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteFile does operations.DeleteFile via the cluster
|
|
||||||
//
|
|
||||||
// If useBackupDir is set and --backup-dir is in effect then it moves
|
|
||||||
// the file to there instead of deleting
|
|
||||||
func (c *Cluster) DeleteFile(ctx context.Context, dst fs.Object) (err error) {
|
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst, "deleting")
|
|
||||||
err = accounting.Stats(ctx).DeleteFile(ctx, dst.Size())
|
|
||||||
if err != nil {
|
|
||||||
tr.Done(ctx, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if operations.SkipDestructive(ctx, dst, "cluster delete") {
|
|
||||||
tr.Done(ctx, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fdst, ok := dst.Fs().(fs.Fs)
|
|
||||||
if !ok {
|
|
||||||
err = errors.New("internal error: cluster delete: can't cast dst.Fs() to fs.Fs")
|
|
||||||
tr.Done(ctx, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
in := rc.Params{
|
|
||||||
"_path": "operations/deletefile",
|
|
||||||
"fs": fs.ConfigStringFull(fdst),
|
|
||||||
"remote": dst.Remote(),
|
|
||||||
}
|
|
||||||
return c.addToBatch(ctx, dst, in, 0, tr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// processCompletedJob loads the job and checks it off
|
|
||||||
func (c *Cluster) processCompletedJob(ctx context.Context, obj fs.Object) error {
|
|
||||||
name := path.Base(obj.Remote())
|
|
||||||
name, _ = strings.CutSuffix(name, ".json")
|
|
||||||
fs.Debugf(nil, "cluster: processing completed job %q", name)
|
|
||||||
|
|
||||||
var output BatchResult
|
|
||||||
err := c.jobs.readJob(ctx, obj, &output)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("check jobs read: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.mu.Lock()
|
|
||||||
input, ok := c.inflight[name]
|
|
||||||
// FIXME delete or save job
|
|
||||||
if !ok {
|
|
||||||
for k := range c.inflight {
|
|
||||||
fs.Debugf(nil, "key %q", k)
|
|
||||||
}
|
|
||||||
c.mu.Unlock()
|
|
||||||
return fmt.Errorf("check jobs: job %q not found", name)
|
|
||||||
}
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
// Delete the inflight entry when batch is processed
|
|
||||||
defer func() {
|
|
||||||
c.mu.Lock()
|
|
||||||
delete(c.inflight, name)
|
|
||||||
c.mu.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Check job
|
|
||||||
if output.Error != "" {
|
|
||||||
return fmt.Errorf("cluster: failed to run batch job: %s (%d)", output.Error, output.Status)
|
|
||||||
}
|
|
||||||
if len(input.Inputs) != len(output.Results) {
|
|
||||||
return fmt.Errorf("cluster: input had %d jobs but output had %d", len(input.Inputs), len(output.Results))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run through the batch and mark operations as successful or not
|
|
||||||
for i := range input.Inputs {
|
|
||||||
in := input.Inputs[i]
|
|
||||||
tr := input.trs[i]
|
|
||||||
size := input.sizes[i]
|
|
||||||
out := output.Results[i]
|
|
||||||
errorString, hasError := out["error"]
|
|
||||||
var err error
|
|
||||||
if hasError && errorString != "" {
|
|
||||||
err = fmt.Errorf("cluster: worker error: %s (%v)", errorString, out["status"])
|
|
||||||
}
|
|
||||||
if err == nil && in["_path"] == "operations/movefile" {
|
|
||||||
accounting.Stats(ctx).Renames(1)
|
|
||||||
}
|
|
||||||
acc := tr.Account(ctx, nil)
|
|
||||||
acc.AccountReadN(size)
|
|
||||||
tr.Done(ctx, err)
|
|
||||||
remote, ok := in["dstRemote"]
|
|
||||||
if !ok {
|
|
||||||
remote = in["remote"]
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
fs.Infof(remote, "cluster %s successful", in["_path"])
|
|
||||||
} else {
|
|
||||||
fs.Errorf(remote, "cluster %s failed: %v", in["_path"], err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadWorkerStatus updates the worker status
|
|
||||||
func (c *Cluster) loadWorkerStatus(ctx context.Context) error {
|
|
||||||
objs, err := c.jobs.listDir(ctx, clusterStatus)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cluster: get job status list failed: %w", err)
|
|
||||||
}
|
|
||||||
ec := errcount.New()
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
var mu sync.Mutex
|
|
||||||
for _, obj := range objs {
|
|
||||||
g.Go(func() error {
|
|
||||||
buf, err := c.jobs.readFile(gCtx, obj)
|
|
||||||
if err != nil {
|
|
||||||
ec.Add(fmt.Errorf("read object: %w", err))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
workerStatus := new(WorkerStatus)
|
|
||||||
err = json.Unmarshal(buf, workerStatus)
|
|
||||||
if err != nil {
|
|
||||||
ec.Add(fmt.Errorf("status json: %w", err))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
mu.Lock()
|
|
||||||
c.workers[workerStatus.ID] = workerStatus
|
|
||||||
mu.Unlock()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return ec.Err("cluster: load status")
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkWorkers loads the worker status
|
|
||||||
func (c *Cluster) checkWorkers(ctx context.Context) {
|
|
||||||
err := c.loadWorkerStatus(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "failed to read some worker status: %v", err)
|
|
||||||
}
|
|
||||||
for workerID, status := range c.workers {
|
|
||||||
timeSinceUpdated := time.Since(status.Updated)
|
|
||||||
if timeSinceUpdated > workerTimeout {
|
|
||||||
if _, isDead := c.deadWorkers[workerID]; isDead {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fs.Errorf(nil, "cluster: haven't heard from worker %q for %v - assuming dead", workerID, timeSinceUpdated)
|
|
||||||
// Find any jobs claimed by worker and restart
|
|
||||||
objs, err := c.jobs.listDir(ctx, clusterProcessing)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: failed to find pending jobs: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, obj := range objs {
|
|
||||||
fs.Errorf(obj, "cluster: checking job")
|
|
||||||
// Jobs are named {jobID}-{workerID}.json
|
|
||||||
name := strings.TrimSuffix(path.Base(obj.Remote()), ".json")
|
|
||||||
dash := strings.LastIndex(name, "-")
|
|
||||||
if dash < 0 {
|
|
||||||
fs.Errorf(nil, "cluster: failed to find dash in job %q", name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
jobID, jobWorkerID := name[:dash], name[dash+1:]
|
|
||||||
fs.Errorf(obj, "cluster: checking jobID %q, workerID %q", jobID, jobWorkerID)
|
|
||||||
if workerID != jobWorkerID {
|
|
||||||
fs.Debugf(nil, "cluster: job %q doesn't match %q", jobWorkerID, workerID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Found a job running on worker - rename it back to Pending
|
|
||||||
newRemote := path.Join(clusterPending, jobID+".json")
|
|
||||||
_, err = c.jobs.rename(ctx, obj, newRemote)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: failed to restart job %q: %v", jobID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fs.Errorf(nil, "cluster: restarted job %q", jobID)
|
|
||||||
}
|
|
||||||
c.deadWorkers[workerID] = struct{}{}
|
|
||||||
} else {
|
|
||||||
if _, isDead := c.deadWorkers[workerID]; isDead {
|
|
||||||
fs.Errorf(nil, "cluster: dead worker %q came back to life!", workerID)
|
|
||||||
delete(c.deadWorkers, workerID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkJobs sees if there are any completed jobs
|
|
||||||
func (c *Cluster) checkJobs(ctx context.Context) {
|
|
||||||
objs, err := c.jobs.listDir(ctx, clusterDone)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: get completed job list failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, obj := range objs {
|
|
||||||
err := c.processCompletedJob(ctx, obj)
|
|
||||||
status := "output-ok"
|
|
||||||
ok := true
|
|
||||||
if err != nil {
|
|
||||||
status = "output-failed"
|
|
||||||
ok = false
|
|
||||||
fs.Errorf(nil, "cluster: process completed job failed: %v", err)
|
|
||||||
}
|
|
||||||
c.jobs.finish(ctx, obj, status, ok)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the background process
|
|
||||||
func (c *Cluster) run(ctx context.Context) {
|
|
||||||
defer c.wg.Done()
|
|
||||||
checkJobs := time.NewTicker(clusterCheckJobsInterval)
|
|
||||||
defer checkJobs.Stop()
|
|
||||||
checkWorkers := time.NewTicker(clusterCheckWorkersInterval)
|
|
||||||
defer checkWorkers.Stop()
|
|
||||||
var syncedChans []chan<- struct{}
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-c.quit:
|
|
||||||
fs.Debugf(nil, "cluster: quit request received")
|
|
||||||
return
|
|
||||||
case synced := <-c.sync:
|
|
||||||
syncedChans = append(syncedChans, synced)
|
|
||||||
fs.Debugf(nil, "cluster: sync request received")
|
|
||||||
case <-checkWorkers.C:
|
|
||||||
c.checkWorkers(ctx)
|
|
||||||
case <-checkJobs.C:
|
|
||||||
}
|
|
||||||
c.checkJobs(ctx)
|
|
||||||
if len(syncedChans) > 0 {
|
|
||||||
c.mu.Lock()
|
|
||||||
n := len(c.inflight)
|
|
||||||
c.mu.Unlock()
|
|
||||||
if n == 0 {
|
|
||||||
fs.Debugf(nil, "cluster: synced")
|
|
||||||
for _, synced := range syncedChans {
|
|
||||||
synced <- struct{}{}
|
|
||||||
}
|
|
||||||
syncedChans = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync the cluster.
|
|
||||||
//
|
|
||||||
// Call this when all job items have been added to the cluster.
|
|
||||||
//
|
|
||||||
// This will wait for any outstanding jobs to finish regardless of who
|
|
||||||
// put them in
|
|
||||||
func (c *Cluster) Sync(ctx context.Context) error {
|
|
||||||
// Flush any outstanding
|
|
||||||
c.mu.Lock()
|
|
||||||
err := c.sendBatch(ctx)
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
// Wait for the cluster to be empty
|
|
||||||
done := make(chan struct{})
|
|
||||||
c.sync <- done
|
|
||||||
<-done
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the cluster.
|
|
||||||
//
|
|
||||||
// Call this when all job items have been added to the cluster.
|
|
||||||
//
|
|
||||||
// This will wait for any outstanding jobs to finish.
|
|
||||||
func (c *Cluster) Shutdown(ctx context.Context) (err error) {
|
|
||||||
c.mu.Lock()
|
|
||||||
inBatch := len(c.currentBatch.Inputs)
|
|
||||||
inFlight := len(c.inflight)
|
|
||||||
shutdown := c.shutdown
|
|
||||||
c.shutdown = true
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
if inBatch > 0 {
|
|
||||||
err = errors.Join(nil, fmt.Errorf("%d items batched on cluster shutdown", inBatch))
|
|
||||||
}
|
|
||||||
if inFlight > 0 {
|
|
||||||
err = errors.Join(nil, fmt.Errorf("%d items in flight on cluster shutdown", inFlight))
|
|
||||||
}
|
|
||||||
if shutdown {
|
|
||||||
fs.Debugf(nil, "cluster: already shutdown")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
c.quit <- struct{}{}
|
|
||||||
fs.Debugf(nil, "Waiting for cluster to finish")
|
|
||||||
c.wg.Wait()
|
|
||||||
|
|
||||||
// Send a quit job
|
|
||||||
if c.quitWorkers {
|
|
||||||
fs.Logf(nil, "Sending quit to workers")
|
|
||||||
quitErr := c.jobs.writeQuitJob(ctx, clusterPending)
|
|
||||||
if quitErr != nil {
|
|
||||||
err = errors.Join(err, fmt.Errorf("shutdown quit: %w", quitErr))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort the cluster and any outstanding jobs.
|
|
||||||
func (c *Cluster) Abort() {
|
|
||||||
c.cancel()
|
|
||||||
c.wg.Wait()
|
|
||||||
}
|
|
||||||
@@ -1,311 +0,0 @@
|
|||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"cmp"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"slices"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/cache"
|
|
||||||
"github.com/rclone/rclone/fs/hash"
|
|
||||||
"github.com/rclone/rclone/fs/object"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Batches flow from queue/pending to queue/processing/
|
|
||||||
const (
|
|
||||||
clusterQueue = "queue"
|
|
||||||
clusterPending = clusterQueue + "/pending"
|
|
||||||
clusterProcessing = clusterQueue + "/processing"
|
|
||||||
clusterDone = clusterQueue + "/done"
|
|
||||||
clusterFinished = clusterQueue + "/finished"
|
|
||||||
clusterStatus = clusterQueue + "/status"
|
|
||||||
|
|
||||||
minSleep = 10 * time.Millisecond
|
|
||||||
maxSleep = 2 * time.Second
|
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
|
||||||
|
|
||||||
// Read the queue this often
|
|
||||||
clusterCheckJobsInterval = time.Second
|
|
||||||
|
|
||||||
// Write the worker status this often
|
|
||||||
clusterWriteStatusInterval = time.Second
|
|
||||||
|
|
||||||
// Read the worker status this often
|
|
||||||
clusterCheckWorkersInterval = time.Second
|
|
||||||
|
|
||||||
// Name of job which signals to the workers to quit
|
|
||||||
quitJob = "QUIT"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Jobs is a container for sending and receiving jobs to the cluster.
|
|
||||||
type Jobs struct {
|
|
||||||
remote string // remote for job storage
|
|
||||||
f fs.Fs // cluster remote storage
|
|
||||||
partial bool // do we need to write and rename
|
|
||||||
hasMove bool // set if f has server side move otherwise has server side copy
|
|
||||||
cleanup fs.ClusterCleanup // how we cleanup the cluster files
|
|
||||||
pacer *fs.Pacer // To pace the API calls
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewJobs creates a Jobs source from the config in ctx.
|
|
||||||
//
|
|
||||||
// It may return nil for no cluster is configured.
|
|
||||||
func NewJobs(ctx context.Context) (*Jobs, error) {
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
if ci.Cluster == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
f, err := cache.Get(ctx, ci.Cluster)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cluster remote creation: %w", err)
|
|
||||||
}
|
|
||||||
features := f.Features()
|
|
||||||
if features.Move == nil && features.Copy == nil {
|
|
||||||
return nil, fmt.Errorf("cluster remote must have server side move and %q doesn't", ci.Cluster)
|
|
||||||
}
|
|
||||||
jobs := &Jobs{
|
|
||||||
remote: ci.Cluster,
|
|
||||||
f: f,
|
|
||||||
partial: features.PartialUploads,
|
|
||||||
hasMove: features.Move != nil,
|
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
||||||
cleanup: ci.ClusterCleanup,
|
|
||||||
}
|
|
||||||
return jobs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the cluster directory structure
|
|
||||||
func (jobs *Jobs) createDirectoryStructure(ctx context.Context) (err error) {
|
|
||||||
for _, dir := range []string{clusterPending, clusterProcessing, clusterDone, clusterFinished, clusterStatus} {
|
|
||||||
err = jobs.f.Mkdir(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cluster mkdir %q: %w", dir, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rename a file
|
|
||||||
//
|
|
||||||
// if this returns fs.ErrorObjectNotFound then the file has already been renamed.
|
|
||||||
func (jobs *Jobs) rename(ctx context.Context, src fs.Object, dstRemote string) (dst fs.Object, err error) {
|
|
||||||
features := jobs.f.Features()
|
|
||||||
if jobs.hasMove {
|
|
||||||
dst, err = features.Move(ctx, src, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cluster: failed to rename job file: %w", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dst, err = features.Copy(ctx, src, dstRemote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cluster: failed to rename (copy phase) job file: %w", err)
|
|
||||||
}
|
|
||||||
err = src.Remove(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cluster: failed to rename (delete phase) job file: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finish with a jobs file
|
|
||||||
func (jobs *Jobs) finish(ctx context.Context, obj fs.Object, status string, ok bool) {
|
|
||||||
var err error
|
|
||||||
if (ok && jobs.cleanup == fs.ClusterCleanupCompleted) || jobs.cleanup == fs.ClusterCleanupFull {
|
|
||||||
err = obj.Remove(ctx)
|
|
||||||
} else {
|
|
||||||
name := path.Join(clusterFinished, status, path.Base(obj.Remote()))
|
|
||||||
_, err = jobs.rename(ctx, obj, name)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: removing completed job failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// write buf into remote
|
|
||||||
func (jobs *Jobs) writeFile(ctx context.Context, remote string, modTime time.Time, buf []byte) error {
|
|
||||||
partialRemote := remote
|
|
||||||
if jobs.partial {
|
|
||||||
partialRemote = remote + ".partial"
|
|
||||||
}
|
|
||||||
// Calculate hashes
|
|
||||||
w, err := hash.NewMultiHasherTypes(jobs.f.Hashes())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write(buf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
obji := object.NewStaticObjectInfo(partialRemote, modTime, int64(len(buf)), true, w.Sums(), jobs.f)
|
|
||||||
var obj fs.Object
|
|
||||||
err = jobs.pacer.Call(func() (bool, error) {
|
|
||||||
in := bytes.NewBuffer(buf)
|
|
||||||
obj, err = jobs.f.Put(ctx, in, obji)
|
|
||||||
if err != nil {
|
|
||||||
return true, fmt.Errorf("cluster: failed to write %q: %q", remote, err)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if jobs.partial {
|
|
||||||
obj, err = jobs.rename(ctx, obj, remote)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the file if it exists
|
|
||||||
func (jobs *Jobs) removeFile(ctx context.Context, remote string) error {
|
|
||||||
obj, err := jobs.f.NewObject(ctx, remote)
|
|
||||||
if errors.Is(err, fs.ErrorObjectNotFound) || errors.Is(err, fs.ErrorDirNotFound) {
|
|
||||||
return nil
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return obj.Remove(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// write a job to a file returning the name
|
|
||||||
func (jobs *Jobs) writeJob(ctx context.Context, where string, job any) (name string, err error) {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
name = fmt.Sprintf("%s-%s", now.Format(time.RFC3339Nano), random.String(20))
|
|
||||||
remote := path.Join(where, name+".json")
|
|
||||||
buf, err := json.MarshalIndent(job, "", "\t")
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("cluster: job json: %w", err)
|
|
||||||
}
|
|
||||||
err = jobs.writeFile(ctx, remote, now, buf)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("cluster: job write: %w", err)
|
|
||||||
}
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// write a quit job to a file
|
|
||||||
func (jobs *Jobs) writeQuitJob(ctx context.Context, where string) (err error) {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
remote := path.Join(where, quitJob+".json")
|
|
||||||
err = jobs.writeFile(ctx, remote, now, []byte("{}"))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cluster: quit job write: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// read buf from object
|
|
||||||
func (jobs *Jobs) readFile(ctx context.Context, o fs.Object) (buf []byte, err error) {
|
|
||||||
err = jobs.pacer.Call(func() (bool, error) {
|
|
||||||
in, err := operations.Open(ctx, o)
|
|
||||||
if err != nil {
|
|
||||||
return true, fmt.Errorf("cluster: failed to open %q: %w", o, err)
|
|
||||||
}
|
|
||||||
buf, err = io.ReadAll(in)
|
|
||||||
if err != nil {
|
|
||||||
return true, fmt.Errorf("cluster: failed to read %q: %w", o, err)
|
|
||||||
}
|
|
||||||
err = in.Close()
|
|
||||||
if err != nil {
|
|
||||||
return true, fmt.Errorf("cluster: failed to close %q: %w", o, err)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// read a job from a file
|
|
||||||
//
|
|
||||||
// job should be a pointer to something to be unmarshalled
|
|
||||||
func (jobs *Jobs) readJob(ctx context.Context, obj fs.Object, job any) error {
|
|
||||||
buf, err := jobs.readFile(ctx, obj)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cluster: job read: %w", err)
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(buf, job)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cluster: job read json: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lists the json files in a cluster directory
|
|
||||||
func (jobs *Jobs) listDir(ctx context.Context, dir string) (objects []fs.Object, err error) {
|
|
||||||
entries, err := jobs.f.List(ctx, dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cluster: failed to list %q: %w", dir, err)
|
|
||||||
}
|
|
||||||
entries.ForObject(func(o fs.Object) {
|
|
||||||
if strings.HasSuffix(o.Remote(), ".json") {
|
|
||||||
objects = append(objects, o)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
slices.SortStableFunc(objects, func(a, b fs.Object) int {
|
|
||||||
return cmp.Compare(a.Remote(), b.Remote())
|
|
||||||
})
|
|
||||||
return objects, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// get a job from pending if there is one available.
|
|
||||||
//
|
|
||||||
// Returns a nil object if no jobs are available.
|
|
||||||
//
|
|
||||||
// FIXME should mark jobs as error jobs in here if they can't be read properly?
|
|
||||||
func (jobs *Jobs) getJob(ctx context.Context, id string) (name string, obj fs.Object, err error) {
|
|
||||||
objs, err := jobs.listDir(ctx, clusterPending)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, fmt.Errorf("get job list: %w", err)
|
|
||||||
}
|
|
||||||
quit := false
|
|
||||||
for len(objs) > 0 {
|
|
||||||
obj = objs[0]
|
|
||||||
objs = objs[1:]
|
|
||||||
name = path.Base(obj.Remote())
|
|
||||||
name, _ = strings.CutSuffix(name, ".json")
|
|
||||||
|
|
||||||
// See if we have been asked to quit
|
|
||||||
if name == quitJob {
|
|
||||||
quit = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// claim the job
|
|
||||||
newName := fmt.Sprintf("%s-%s.json", name, id)
|
|
||||||
newRemote := path.Join(clusterProcessing, newName)
|
|
||||||
obj, err = jobs.rename(ctx, obj, newRemote)
|
|
||||||
if errors.Is(err, fs.ErrorObjectNotFound) {
|
|
||||||
// claim failed - try again
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, fmt.Errorf("get job claim: %w", err)
|
|
||||||
}
|
|
||||||
return name, obj, nil
|
|
||||||
}
|
|
||||||
// No jobs found
|
|
||||||
if quit {
|
|
||||||
fs.Logf(nil, "Exiting cluster worker on command")
|
|
||||||
atexit.Run()
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
return "", nil, nil
|
|
||||||
}
|
|
||||||
@@ -1,211 +0,0 @@
|
|||||||
package cluster
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"path"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
|
||||||
"github.com/rclone/rclone/fs/rc"
|
|
||||||
"github.com/rclone/rclone/fs/rc/jobs"
|
|
||||||
"github.com/rclone/rclone/lib/random"
|
|
||||||
)
|
|
||||||
|
|
||||||
const maxWorkersDone = 16 // maximum jobs in the done list
|
|
||||||
|
|
||||||
// Worker describes a single instance of a cluster worker.
|
|
||||||
type Worker struct {
|
|
||||||
jobs *Jobs
|
|
||||||
cancel func() // stop bg job
|
|
||||||
wg sync.WaitGroup // bg job finished
|
|
||||||
id string // id of this worker
|
|
||||||
status string // place it stores it status
|
|
||||||
|
|
||||||
jobsMu sync.Mutex
|
|
||||||
running map[string]struct{} // IDs of the jobs being processed
|
|
||||||
done []string // IDs of finished jobs
|
|
||||||
}
|
|
||||||
|
|
||||||
// WorkerStatus shows the status of this worker including jobs
|
|
||||||
// running.
|
|
||||||
type WorkerStatus struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
Running map[string]rc.Params `json:"running"` // Job ID => accounting.RemoteStats
|
|
||||||
Done map[string]bool `json:"done"` // Job ID => finished status
|
|
||||||
Updated time.Time `json:"updated"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWorker creates a new cluster from the config in ctx.
|
|
||||||
//
|
|
||||||
// It may return nil for no cluster is configured.
|
|
||||||
func NewWorker(ctx context.Context) (*Worker, error) {
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
if ci.Cluster == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
jobs, err := NewJobs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
w := &Worker{
|
|
||||||
jobs: jobs,
|
|
||||||
id: ci.ClusterID,
|
|
||||||
running: make(map[string]struct{}),
|
|
||||||
}
|
|
||||||
if w.id == "" {
|
|
||||||
w.id = random.String(10)
|
|
||||||
}
|
|
||||||
w.status = path.Join(clusterStatus, w.id+".json")
|
|
||||||
|
|
||||||
// Start the background workers
|
|
||||||
bgCtx, cancel := context.WithCancel(context.Background())
|
|
||||||
w.cancel = cancel
|
|
||||||
w.wg.Add(1)
|
|
||||||
go w.runJobs(bgCtx)
|
|
||||||
w.wg.Add(1)
|
|
||||||
go w.runStatus(bgCtx)
|
|
||||||
|
|
||||||
fs.Logf(w.jobs.f, "Started cluster worker")
|
|
||||||
|
|
||||||
return w, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check to see if a job exists and run it if available
|
|
||||||
func (w *Worker) checkJobs(ctx context.Context) {
|
|
||||||
name, obj, err := w.jobs.getJob(ctx, w.id)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "check jobs get: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if obj == nil {
|
|
||||||
return // no jobs available
|
|
||||||
}
|
|
||||||
|
|
||||||
// make a stats group for this job
|
|
||||||
ctx = accounting.WithStatsGroup(ctx, name)
|
|
||||||
|
|
||||||
// Add job ID
|
|
||||||
w.jobsMu.Lock()
|
|
||||||
w.running[name] = struct{}{}
|
|
||||||
w.jobsMu.Unlock()
|
|
||||||
fs.Infof(nil, "write jobID %q", name)
|
|
||||||
|
|
||||||
// Remove job ID on exit
|
|
||||||
defer func() {
|
|
||||||
w.jobsMu.Lock()
|
|
||||||
delete(w.running, name)
|
|
||||||
w.done = append(w.done, name)
|
|
||||||
if len(w.done) > maxWorkersDone {
|
|
||||||
w.done = w.done[len(w.done)-maxWorkersDone : len(w.done)]
|
|
||||||
}
|
|
||||||
w.jobsMu.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
fs.Debugf(nil, "cluster: processing pending job %q", name)
|
|
||||||
inBuf, err := w.jobs.readFile(ctx, obj)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "check jobs read: %v", err)
|
|
||||||
w.jobs.finish(ctx, obj, "input-error", false)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
outBuf := jobs.NewJobFromBytes(ctx, inBuf)
|
|
||||||
remote := path.Join(clusterDone, name+".json")
|
|
||||||
err = w.jobs.writeFile(ctx, remote, time.Now(), outBuf)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "check jobs failed to write output: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.jobs.finish(ctx, obj, "input-ok", true)
|
|
||||||
fs.Debugf(nil, "cluster: processed pending job %q", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the background process to pick up jobs
|
|
||||||
func (w *Worker) runJobs(ctx context.Context) {
|
|
||||||
defer w.wg.Done()
|
|
||||||
checkJobs := time.NewTicker(clusterCheckJobsInterval)
|
|
||||||
defer checkJobs.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-checkJobs.C:
|
|
||||||
w.checkJobs(ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the worker status
|
|
||||||
func (w *Worker) writeStatus(ctx context.Context) {
|
|
||||||
// Create the worker status from the jobIDs and the short stats
|
|
||||||
status := WorkerStatus{
|
|
||||||
ID: w.id,
|
|
||||||
Running: make(map[string]rc.Params),
|
|
||||||
Updated: time.Now(),
|
|
||||||
Done: make(map[string]bool),
|
|
||||||
}
|
|
||||||
w.jobsMu.Lock()
|
|
||||||
for _, jobID := range w.done {
|
|
||||||
status.Done[jobID] = true
|
|
||||||
}
|
|
||||||
for jobID := range w.running {
|
|
||||||
fs.Infof(nil, "read jobID %q", jobID)
|
|
||||||
si := accounting.StatsGroup(ctx, jobID)
|
|
||||||
out, err := si.RemoteStats(true)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: write status: stats: %v", err)
|
|
||||||
status.Running[jobID] = rc.Params{}
|
|
||||||
} else {
|
|
||||||
status.Running[jobID] = out
|
|
||||||
}
|
|
||||||
status.Done[jobID] = false
|
|
||||||
}
|
|
||||||
w.jobsMu.Unlock()
|
|
||||||
|
|
||||||
// Write the stats to a file
|
|
||||||
buf, err := json.MarshalIndent(status, "", "\t")
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: write status: json: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = w.jobs.writeFile(ctx, w.status, status.Updated, buf)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: write status: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the worker status
|
|
||||||
func (w *Worker) clearStatus(ctx context.Context) {
|
|
||||||
err := w.jobs.removeFile(ctx, w.status)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "cluster: clear status: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the background process to write status
|
|
||||||
func (w *Worker) runStatus(ctx context.Context) {
|
|
||||||
defer w.wg.Done()
|
|
||||||
w.writeStatus(ctx)
|
|
||||||
defer w.clearStatus(ctx)
|
|
||||||
writeStatus := time.NewTicker(clusterWriteStatusInterval)
|
|
||||||
defer writeStatus.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-writeStatus.C:
|
|
||||||
t0 := time.Now()
|
|
||||||
w.writeStatus(ctx)
|
|
||||||
fs.Debugf(nil, "write status took %v at %v", time.Since(t0), t0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown the worker regardless of whether it has work to process or not.
|
|
||||||
func (w *Worker) Shutdown(ctx context.Context) error {
|
|
||||||
w.cancel()
|
|
||||||
w.wg.Wait()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
64
fs/config.go
64
fs/config.go
@@ -50,34 +50,6 @@ var (
|
|||||||
ConfigEdit = "config_fs_edit"
|
ConfigEdit = "config_fs_edit"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterCleanup describes the cluster cleanup choices.
|
|
||||||
type ClusterCleanup = Enum[clusterCleanupChoices]
|
|
||||||
|
|
||||||
// Cluster cleanup choices.
|
|
||||||
//
|
|
||||||
// ClusterCleanupNone don't remove any cluster files
|
|
||||||
// ClusterCleanupCompleted remove successfully completed jobs
|
|
||||||
// ClusterCleanupFull remove everything on exit
|
|
||||||
const (
|
|
||||||
ClusterCleanupNone ClusterCleanup = iota
|
|
||||||
ClusterCleanupCompleted
|
|
||||||
ClusterCleanupFull
|
|
||||||
)
|
|
||||||
|
|
||||||
type clusterCleanupChoices struct{}
|
|
||||||
|
|
||||||
func (clusterCleanupChoices) Choices() []string {
|
|
||||||
return []string{
|
|
||||||
ClusterCleanupNone: "none",
|
|
||||||
ClusterCleanupCompleted: "completed",
|
|
||||||
ClusterCleanupFull: "full",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (clusterCleanupChoices) Type() string {
|
|
||||||
return "ClusterCleanup"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigOptionsInfo describes the Options in use
|
// ConfigOptionsInfo describes the Options in use
|
||||||
var ConfigOptionsInfo = Options{{
|
var ConfigOptionsInfo = Options{{
|
||||||
Name: "modify_window",
|
Name: "modify_window",
|
||||||
@@ -594,36 +566,6 @@ var ConfigOptionsInfo = Options{{
|
|||||||
Default: "",
|
Default: "",
|
||||||
Help: "HTTP proxy URL.",
|
Help: "HTTP proxy URL.",
|
||||||
Groups: "Networking",
|
Groups: "Networking",
|
||||||
}, {
|
|
||||||
Name: "cluster",
|
|
||||||
Default: "",
|
|
||||||
Help: "Enable cluster mode with remote to use as shared storage.",
|
|
||||||
Groups: "Networking",
|
|
||||||
}, {
|
|
||||||
Name: "cluster_id",
|
|
||||||
Default: "",
|
|
||||||
Help: "Set to an ID for the cluster. An ID of 0 or empty becomes the controller.",
|
|
||||||
Groups: "Networking",
|
|
||||||
}, {
|
|
||||||
Name: "cluster_quit_workers",
|
|
||||||
Default: false,
|
|
||||||
Help: "Set to cause the controller to quit the workers when it finished.",
|
|
||||||
Groups: "Networking",
|
|
||||||
}, {
|
|
||||||
Name: "cluster_batch_files",
|
|
||||||
Default: 1000,
|
|
||||||
Help: "Max number of files for a cluster batch.",
|
|
||||||
Groups: "Networking",
|
|
||||||
}, {
|
|
||||||
Name: "cluster_batch_size",
|
|
||||||
Default: Tebi,
|
|
||||||
Help: "Max size of files for a cluster batch.",
|
|
||||||
Groups: "Networking",
|
|
||||||
}, {
|
|
||||||
Name: "cluster_cleanup",
|
|
||||||
Default: ClusterCleanupFull,
|
|
||||||
Help: "Control which cluster files get cleaned up.",
|
|
||||||
Groups: "Networking",
|
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// ConfigInfo is filesystem config options
|
// ConfigInfo is filesystem config options
|
||||||
@@ -738,12 +680,6 @@ type ConfigInfo struct {
|
|||||||
MaxConnections int `config:"max_connections"`
|
MaxConnections int `config:"max_connections"`
|
||||||
NameTransform []string `config:"name_transform"`
|
NameTransform []string `config:"name_transform"`
|
||||||
HTTPProxy string `config:"http_proxy"`
|
HTTPProxy string `config:"http_proxy"`
|
||||||
Cluster string `config:"cluster"`
|
|
||||||
ClusterID string `config:"cluster_id"`
|
|
||||||
ClusterQuitWorkers bool `config:"cluster_quit_workers"`
|
|
||||||
ClusterBatchFiles int `config:"cluster_batch_files"`
|
|
||||||
ClusterBatchSize SizeSuffix `config:"cluster_batch_size"`
|
|
||||||
ClusterCleanup ClusterCleanup `config:"cluster_cleanup"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
324
fs/fs_test.go
324
fs/fs_test.go
@@ -2,9 +2,21 @@ package fs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -78,3 +90,315 @@ func TestFeaturesDisableList(t *testing.T) {
|
|||||||
assert.False(t, ft.CaseInsensitive)
|
assert.False(t, ft.CaseInsensitive)
|
||||||
assert.False(t, ft.DuplicateFiles)
|
assert.False(t, ft.DuplicateFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check it satisfies the interface
|
||||||
|
var _ pflag.Value = (*Option)(nil)
|
||||||
|
|
||||||
|
func TestOption(t *testing.T) {
|
||||||
|
d := &Option{
|
||||||
|
Name: "potato",
|
||||||
|
Value: SizeSuffix(17 << 20),
|
||||||
|
}
|
||||||
|
assert.Equal(t, "17Mi", d.String())
|
||||||
|
assert.Equal(t, "SizeSuffix", d.Type())
|
||||||
|
err := d.Set("18M")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
||||||
|
err = d.Set("sdfsdf")
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errFoo = errors.New("foo")
|
||||||
|
|
||||||
|
type dummyPaced struct {
|
||||||
|
retry bool
|
||||||
|
called int
|
||||||
|
wait *sync.Cond
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *dummyPaced) fn() (bool, error) {
|
||||||
|
if dp.wait != nil {
|
||||||
|
dp.wait.L.Lock()
|
||||||
|
dp.wait.Wait()
|
||||||
|
dp.wait.L.Unlock()
|
||||||
|
}
|
||||||
|
dp.called++
|
||||||
|
return dp.retry, errFoo
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPacerCall(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
config := GetConfig(ctx)
|
||||||
|
expectedCalled := config.LowLevelRetries
|
||||||
|
if expectedCalled == 0 {
|
||||||
|
ctx, config = AddConfig(ctx)
|
||||||
|
expectedCalled = 20
|
||||||
|
config.LowLevelRetries = expectedCalled
|
||||||
|
}
|
||||||
|
p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||||
|
|
||||||
|
dp := &dummyPaced{retry: true}
|
||||||
|
err := p.Call(dp.fn)
|
||||||
|
require.Equal(t, expectedCalled, dp.called)
|
||||||
|
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPacerCallNoRetry(t *testing.T) {
|
||||||
|
p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
||||||
|
|
||||||
|
dp := &dummyPaced{retry: true}
|
||||||
|
err := p.CallNoRetry(dp.fn)
|
||||||
|
require.Equal(t, 1, dp.called)
|
||||||
|
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test options
|
||||||
|
var (
|
||||||
|
nouncOption = Option{
|
||||||
|
Name: "nounc",
|
||||||
|
}
|
||||||
|
copyLinksOption = Option{
|
||||||
|
Name: "copy_links",
|
||||||
|
Default: false,
|
||||||
|
NoPrefix: true,
|
||||||
|
ShortOpt: "L",
|
||||||
|
Advanced: true,
|
||||||
|
}
|
||||||
|
caseInsensitiveOption = Option{
|
||||||
|
Name: "case_insensitive",
|
||||||
|
Default: false,
|
||||||
|
Value: true,
|
||||||
|
Advanced: true,
|
||||||
|
}
|
||||||
|
testOptions = Options{nouncOption, copyLinksOption, caseInsensitiveOption}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOptionsSetValues(t *testing.T) {
|
||||||
|
assert.Nil(t, testOptions[0].Default)
|
||||||
|
assert.Equal(t, false, testOptions[1].Default)
|
||||||
|
assert.Equal(t, false, testOptions[2].Default)
|
||||||
|
testOptions.setValues()
|
||||||
|
assert.Equal(t, "", testOptions[0].Default)
|
||||||
|
assert.Equal(t, false, testOptions[1].Default)
|
||||||
|
assert.Equal(t, false, testOptions[2].Default)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionsGet(t *testing.T) {
|
||||||
|
opt := testOptions.Get("copy_links")
|
||||||
|
assert.Equal(t, ©LinksOption, opt)
|
||||||
|
opt = testOptions.Get("not_found")
|
||||||
|
assert.Nil(t, opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionsOveridden(t *testing.T) {
|
||||||
|
m := configmap.New()
|
||||||
|
m1 := configmap.Simple{
|
||||||
|
"nounc": "m1",
|
||||||
|
"copy_links": "m1",
|
||||||
|
}
|
||||||
|
m.AddGetter(m1, configmap.PriorityNormal)
|
||||||
|
m2 := configmap.Simple{
|
||||||
|
"nounc": "m2",
|
||||||
|
"case_insensitive": "m2",
|
||||||
|
}
|
||||||
|
m.AddGetter(m2, configmap.PriorityConfig)
|
||||||
|
m3 := configmap.Simple{
|
||||||
|
"nounc": "m3",
|
||||||
|
}
|
||||||
|
m.AddGetter(m3, configmap.PriorityDefault)
|
||||||
|
got := testOptions.Overridden(m)
|
||||||
|
assert.Equal(t, configmap.Simple{
|
||||||
|
"copy_links": "m1",
|
||||||
|
"nounc": "m1",
|
||||||
|
}, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionsNonDefault(t *testing.T) {
|
||||||
|
m := configmap.Simple{}
|
||||||
|
got := testOptions.NonDefault(m)
|
||||||
|
assert.Equal(t, configmap.Simple{}, got)
|
||||||
|
|
||||||
|
m["case_insensitive"] = "false"
|
||||||
|
got = testOptions.NonDefault(m)
|
||||||
|
assert.Equal(t, configmap.Simple{}, got)
|
||||||
|
|
||||||
|
m["case_insensitive"] = "true"
|
||||||
|
got = testOptions.NonDefault(m)
|
||||||
|
assert.Equal(t, configmap.Simple{"case_insensitive": "true"}, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionMarshalJSON(t *testing.T) {
|
||||||
|
out, err := json.MarshalIndent(&caseInsensitiveOption, "", "")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
require.Equal(t, `{
|
||||||
|
"Name": "case_insensitive",
|
||||||
|
"FieldName": "",
|
||||||
|
"Help": "",
|
||||||
|
"Default": false,
|
||||||
|
"Value": true,
|
||||||
|
"Hide": 0,
|
||||||
|
"Required": false,
|
||||||
|
"IsPassword": false,
|
||||||
|
"NoPrefix": false,
|
||||||
|
"Advanced": true,
|
||||||
|
"Exclusive": false,
|
||||||
|
"Sensitive": false,
|
||||||
|
"DefaultStr": "false",
|
||||||
|
"ValueStr": "true",
|
||||||
|
"Type": "bool"
|
||||||
|
}`, string(out))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionGetValue(t *testing.T) {
|
||||||
|
assert.Equal(t, "", nouncOption.GetValue())
|
||||||
|
assert.Equal(t, false, copyLinksOption.GetValue())
|
||||||
|
assert.Equal(t, true, caseInsensitiveOption.GetValue())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionString(t *testing.T) {
|
||||||
|
assert.Equal(t, "", nouncOption.String())
|
||||||
|
assert.Equal(t, "false", copyLinksOption.String())
|
||||||
|
assert.Equal(t, "true", caseInsensitiveOption.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionStringStringArray(t *testing.T) {
|
||||||
|
opt := Option{
|
||||||
|
Name: "string_array",
|
||||||
|
Default: []string(nil),
|
||||||
|
}
|
||||||
|
assert.Equal(t, "", opt.String())
|
||||||
|
opt.Default = []string{}
|
||||||
|
assert.Equal(t, "", opt.String())
|
||||||
|
opt.Default = []string{"a", "b"}
|
||||||
|
assert.Equal(t, "a,b", opt.String())
|
||||||
|
opt.Default = []string{"hello, world!", "goodbye, world!"}
|
||||||
|
assert.Equal(t, `"hello, world!","goodbye, world!"`, opt.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionStringSizeSuffix(t *testing.T) {
|
||||||
|
opt := Option{
|
||||||
|
Name: "size_suffix",
|
||||||
|
Default: SizeSuffix(0),
|
||||||
|
}
|
||||||
|
assert.Equal(t, "0", opt.String())
|
||||||
|
opt.Default = SizeSuffix(-1)
|
||||||
|
assert.Equal(t, "off", opt.String())
|
||||||
|
opt.Default = SizeSuffix(100)
|
||||||
|
assert.Equal(t, "100B", opt.String())
|
||||||
|
opt.Default = SizeSuffix(1024)
|
||||||
|
assert.Equal(t, "1Ki", opt.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionSet(t *testing.T) {
|
||||||
|
o := caseInsensitiveOption
|
||||||
|
assert.Equal(t, true, o.Value)
|
||||||
|
err := o.Set("FALSE")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, false, o.Value)
|
||||||
|
|
||||||
|
o = copyLinksOption
|
||||||
|
assert.Equal(t, nil, o.Value)
|
||||||
|
err = o.Set("True")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, true, o.Value)
|
||||||
|
|
||||||
|
err = o.Set("INVALID")
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, true, o.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionType(t *testing.T) {
|
||||||
|
assert.Equal(t, "string", nouncOption.Type())
|
||||||
|
assert.Equal(t, "bool", copyLinksOption.Type())
|
||||||
|
assert.Equal(t, "bool", caseInsensitiveOption.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionFlagName(t *testing.T) {
|
||||||
|
assert.Equal(t, "local-nounc", nouncOption.FlagName("local"))
|
||||||
|
assert.Equal(t, "copy-links", copyLinksOption.FlagName("local"))
|
||||||
|
assert.Equal(t, "local-case-insensitive", caseInsensitiveOption.FlagName("local"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionEnvVarName(t *testing.T) {
|
||||||
|
assert.Equal(t, "RCLONE_LOCAL_NOUNC", nouncOption.EnvVarName("local"))
|
||||||
|
assert.Equal(t, "RCLONE_LOCAL_COPY_LINKS", copyLinksOption.EnvVarName("local"))
|
||||||
|
assert.Equal(t, "RCLONE_LOCAL_CASE_INSENSITIVE", caseInsensitiveOption.EnvVarName("local"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOptionGetters(t *testing.T) {
|
||||||
|
// Set up env vars
|
||||||
|
envVars := [][2]string{
|
||||||
|
{"RCLONE_CONFIG_LOCAL_POTATO_PIE", "yes"},
|
||||||
|
{"RCLONE_COPY_LINKS", "TRUE"},
|
||||||
|
{"RCLONE_LOCAL_NOUNC", "NOUNC"},
|
||||||
|
}
|
||||||
|
for _, ev := range envVars {
|
||||||
|
assert.NoError(t, os.Setenv(ev[0], ev[1]))
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
for _, ev := range envVars {
|
||||||
|
assert.NoError(t, os.Unsetenv(ev[0]))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
oldConfigFileGet := ConfigFileGet
|
||||||
|
ConfigFileGet = func(section, key string) (string, bool) {
|
||||||
|
if section == "sausage" && key == "key1" {
|
||||||
|
return "value1", true
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
ConfigFileGet = oldConfigFileGet
|
||||||
|
}()
|
||||||
|
|
||||||
|
// set up getters
|
||||||
|
|
||||||
|
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
|
||||||
|
configEnvVarsGetter := configEnvVars("local")
|
||||||
|
|
||||||
|
// A configmap.Getter to read from the environment RCLONE_option_name
|
||||||
|
optionEnvVarsGetter := optionEnvVars{"local", testOptions}
|
||||||
|
|
||||||
|
// A configmap.Getter to read either the default value or the set
|
||||||
|
// value from the RegInfo.Options
|
||||||
|
regInfoValuesGetterFalse := ®InfoValues{
|
||||||
|
options: testOptions,
|
||||||
|
useDefault: false,
|
||||||
|
}
|
||||||
|
regInfoValuesGetterTrue := ®InfoValues{
|
||||||
|
options: testOptions,
|
||||||
|
useDefault: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// A configmap.Setter to read from the config file
|
||||||
|
configFileGetter := getConfigFile("sausage")
|
||||||
|
|
||||||
|
for i, test := range []struct {
|
||||||
|
get configmap.Getter
|
||||||
|
key string
|
||||||
|
wantValue string
|
||||||
|
wantOk bool
|
||||||
|
}{
|
||||||
|
{configEnvVarsGetter, "not_found", "", false},
|
||||||
|
{configEnvVarsGetter, "potato_pie", "yes", true},
|
||||||
|
{optionEnvVarsGetter, "not_found", "", false},
|
||||||
|
{optionEnvVarsGetter, "copy_links", "TRUE", true},
|
||||||
|
{optionEnvVarsGetter, "nounc", "NOUNC", true},
|
||||||
|
{optionEnvVarsGetter, "case_insensitive", "", false},
|
||||||
|
{regInfoValuesGetterFalse, "not_found", "", false},
|
||||||
|
{regInfoValuesGetterFalse, "case_insensitive", "true", true},
|
||||||
|
{regInfoValuesGetterFalse, "copy_links", "", false},
|
||||||
|
{regInfoValuesGetterTrue, "not_found", "", false},
|
||||||
|
{regInfoValuesGetterTrue, "case_insensitive", "true", true},
|
||||||
|
{regInfoValuesGetterTrue, "copy_links", "false", true},
|
||||||
|
{configFileGetter, "not_found", "", false},
|
||||||
|
{configFileGetter, "key1", "value1", true},
|
||||||
|
} {
|
||||||
|
what := fmt.Sprintf("%d: %+v: %q", i, test.get, test.key)
|
||||||
|
gotValue, gotOk := test.get.Get(test.key)
|
||||||
|
assert.Equal(t, test.wantValue, gotValue, what)
|
||||||
|
assert.Equal(t, test.wantOk, gotOk, what)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,57 +0,0 @@
|
|||||||
package fs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errFoo = errors.New("foo")
|
|
||||||
|
|
||||||
type dummyPaced struct {
|
|
||||||
retry bool
|
|
||||||
called int
|
|
||||||
wait *sync.Cond
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dp *dummyPaced) fn() (bool, error) {
|
|
||||||
if dp.wait != nil {
|
|
||||||
dp.wait.L.Lock()
|
|
||||||
dp.wait.Wait()
|
|
||||||
dp.wait.L.Unlock()
|
|
||||||
}
|
|
||||||
dp.called++
|
|
||||||
return dp.retry, errFoo
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPacerCall(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
config := GetConfig(ctx)
|
|
||||||
expectedCalled := config.LowLevelRetries
|
|
||||||
if expectedCalled == 0 {
|
|
||||||
ctx, config = AddConfig(ctx)
|
|
||||||
expectedCalled = 20
|
|
||||||
config.LowLevelRetries = expectedCalled
|
|
||||||
}
|
|
||||||
p := NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
|
||||||
|
|
||||||
dp := &dummyPaced{retry: true}
|
|
||||||
err := p.Call(dp.fn)
|
|
||||||
require.Equal(t, expectedCalled, dp.called)
|
|
||||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPacerCallNoRetry(t *testing.T) {
|
|
||||||
p := NewPacer(context.Background(), pacer.NewDefault(pacer.MinSleep(1*time.Millisecond), pacer.MaxSleep(2*time.Millisecond)))
|
|
||||||
|
|
||||||
dp := &dummyPaced{retry: true}
|
|
||||||
err := p.CallNoRetry(dp.fn)
|
|
||||||
require.Equal(t, 1, dp.called)
|
|
||||||
require.Implements(t, (*fserrors.Retrier)(nil), err)
|
|
||||||
}
|
|
||||||
@@ -2,15 +2,11 @@
|
|||||||
package jobs
|
package jobs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@@ -21,7 +17,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Fill in these to avoid circular dependencies
|
// Fill in these to avoid circular dependencies
|
||||||
@@ -480,249 +475,3 @@ func rcGroupStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
|||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewJobFromParams creates an rc job rc.Params.
|
|
||||||
//
|
|
||||||
// The JSON blob should contain a _path entry.
|
|
||||||
//
|
|
||||||
// It returns a rc.Params as output which may be an error.
|
|
||||||
func NewJobFromParams(ctx context.Context, in rc.Params) (out rc.Params) {
|
|
||||||
path := "unknown"
|
|
||||||
|
|
||||||
// Return an rc error blob
|
|
||||||
rcError := func(err error, status int) rc.Params {
|
|
||||||
fs.Errorf(nil, "rc: %q: error: %v", path, err)
|
|
||||||
out, _ = rc.Error(path, in, err, status)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the call
|
|
||||||
path, err := in.GetString("_path")
|
|
||||||
if err != nil {
|
|
||||||
return rcError(err, http.StatusNotFound)
|
|
||||||
}
|
|
||||||
delete(in, "_path")
|
|
||||||
call := rc.Calls.Get(path)
|
|
||||||
if call == nil {
|
|
||||||
return rcError(fmt.Errorf("couldn't find path %q", path), http.StatusNotFound)
|
|
||||||
}
|
|
||||||
if call.NeedsRequest {
|
|
||||||
return rcError(fmt.Errorf("can't run path %q as it needs the request", path), http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
if call.NeedsResponse {
|
|
||||||
return rcError(fmt.Errorf("can't run path %q as it needs the response", path), http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pass on the group if one is set in the context and it isn't set in the input.
|
|
||||||
if _, found := in["_group"]; !found {
|
|
||||||
group, ok := accounting.StatsGroupFromContext(ctx)
|
|
||||||
if ok {
|
|
||||||
in["_group"] = group
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "rc: %q: with parameters %+v", path, in)
|
|
||||||
_, out, err = NewJob(ctx, call.Fn, in)
|
|
||||||
if err != nil {
|
|
||||||
return rcError(err, http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
if out == nil {
|
|
||||||
out = make(rc.Params)
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err)
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewJobFromBytes creates an rc job from a JSON blob as bytes.
|
|
||||||
//
|
|
||||||
// The JSON blob should contain a _path entry.
|
|
||||||
//
|
|
||||||
// It returns a JSON blob as output which may be an error.
|
|
||||||
func NewJobFromBytes(ctx context.Context, inBuf []byte) (outBuf []byte) {
|
|
||||||
var in rc.Params
|
|
||||||
var out rc.Params
|
|
||||||
|
|
||||||
// Parse a JSON blob from the input
|
|
||||||
err := json.Unmarshal(inBuf, &in)
|
|
||||||
if err != nil {
|
|
||||||
out, _ = rc.Error("unknown", in, err, http.StatusBadRequest)
|
|
||||||
} else {
|
|
||||||
out = NewJobFromParams(ctx, in)
|
|
||||||
}
|
|
||||||
|
|
||||||
var w bytes.Buffer
|
|
||||||
err = rc.WriteJSON(&w, out)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "rc: NewJobFromBytes: failed to write JSON output: %v", err)
|
|
||||||
return []byte(`{"error":"failed to write JSON output"}`)
|
|
||||||
}
|
|
||||||
return w.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
rc.Add(rc.Call{
|
|
||||||
Path: "job/batch",
|
|
||||||
AuthRequired: true, // require auth always since sub commands may require it
|
|
||||||
Fn: rcBatch,
|
|
||||||
Title: "Run a batch of rclone rc commands concurrently.",
|
|
||||||
Help: strings.ReplaceAll(`
|
|
||||||
This takes the following parameters:
|
|
||||||
|
|
||||||
- concurrency - int - do this many commands concurrently. Defaults to |--transfers| if not set.
|
|
||||||
- inputs - an list of inputs to the commands with an extra |_path| parameter
|
|
||||||
|
|
||||||
|||json
|
|
||||||
{
|
|
||||||
"_path": "rc/path",
|
|
||||||
"param1": "parameter for the path as documented",
|
|
||||||
"param2": "parameter for the path as documented, etc",
|
|
||||||
}
|
|
||||||
|||json
|
|
||||||
|
|
||||||
The inputs may use |_async|, |_group|, |_config| and |_filter| as normal when using the rc.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
- results - a list of results from the commands with one entry for each in inputs.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
|||sh
|
|
||||||
rclone rc job/batch --json '{
|
|
||||||
"inputs": [
|
|
||||||
{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"parameter": "OK"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_path": "rc/error",
|
|
||||||
"parameter": "BAD"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
'
|
|
||||||
|||
|
|
||||||
|
|
||||||
Gives the result:
|
|
||||||
|
|
||||||
|||json
|
|
||||||
{
|
|
||||||
"results": [
|
|
||||||
{
|
|
||||||
"parameter": "OK"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "arbitrary error on input map[parameter:BAD]",
|
|
||||||
"input": {
|
|
||||||
"parameter": "BAD"
|
|
||||||
},
|
|
||||||
"path": "rc/error",
|
|
||||||
"status": 500
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|||
|
|
||||||
`, "|", "`"),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
// Run a single batch job
|
|
||||||
func runBatchJob(ctx context.Context, inputAny any) (out rc.Params, err error) {
|
|
||||||
var in rc.Params
|
|
||||||
path := "unknown"
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
out, _ = rc.Error(path, in, err, http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// get the inputs to the job
|
|
||||||
input, ok := inputAny.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("\"inputs\" items must be objects not %T", inputAny))
|
|
||||||
}
|
|
||||||
in = rc.Params(input)
|
|
||||||
path, err = in.GetString("_path")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
delete(in, "_path")
|
|
||||||
call := rc.Calls.Get(path)
|
|
||||||
|
|
||||||
// Check call
|
|
||||||
if call == nil {
|
|
||||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("path %q does not exist", path))
|
|
||||||
}
|
|
||||||
path = call.Path
|
|
||||||
if call.NeedsRequest {
|
|
||||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("can't run path %q as it needs the request", path))
|
|
||||||
}
|
|
||||||
if call.NeedsResponse {
|
|
||||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("can't run path %q as it needs the response", path))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the job
|
|
||||||
_, out, err = NewJob(ctx, call.Fn, in)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reshape (serialize then deserialize) the data so it is in the form expected
|
|
||||||
err = rc.Reshape(&out, out)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Batch the registered commands
|
|
||||||
func rcBatch(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
|
||||||
out = make(rc.Params)
|
|
||||||
|
|
||||||
// Read inputs
|
|
||||||
inputsAny, err := in.Get("inputs")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
inputs, ok := inputsAny.([]any)
|
|
||||||
if !ok {
|
|
||||||
return nil, rc.NewErrParamInvalid(fmt.Errorf("expecting list key %q (was %T)", "inputs", inputsAny))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read concurrency
|
|
||||||
concurrency, err := in.GetInt64("concurrency")
|
|
||||||
if rc.IsErrParamNotFound(err) {
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
concurrency = int64(ci.Transfers)
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare outputs
|
|
||||||
results := make([]rc.Params, len(inputs))
|
|
||||||
out["results"] = results
|
|
||||||
|
|
||||||
g, gCtx := errgroup.WithContext(ctx)
|
|
||||||
g.SetLimit(int(concurrency))
|
|
||||||
for i, inputAny := range inputs {
|
|
||||||
input, ok := inputAny.(map[string]any)
|
|
||||||
if !ok {
|
|
||||||
results[i], _ = rc.Error("unknown", nil, fmt.Errorf("\"inputs\" items must be objects not %T", inputAny), http.StatusBadRequest)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
in := rc.Params(input)
|
|
||||||
if concurrency <= 1 {
|
|
||||||
results[i] = NewJobFromParams(ctx, in)
|
|
||||||
} else {
|
|
||||||
g.Go(func() error {
|
|
||||||
results[i] = NewJobFromParams(gCtx, in)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ = g.Wait()
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package jobs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -603,294 +602,3 @@ func TestOnFinishDataRace(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register some test rc calls
|
|
||||||
func init() {
|
|
||||||
rc.Add(rc.Call{
|
|
||||||
Path: "test/needs_request",
|
|
||||||
NeedsRequest: true,
|
|
||||||
})
|
|
||||||
rc.Add(rc.Call{
|
|
||||||
Path: "test/needs_response",
|
|
||||||
NeedsResponse: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewJobFromParams(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
for _, test := range []struct {
|
|
||||||
in rc.Params
|
|
||||||
want rc.Params
|
|
||||||
}{{
|
|
||||||
in: rc.Params{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"a": "potato",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"a": "potato",
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
in: rc.Params{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"b": "sausage",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"b": "sausage",
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
in: rc.Params{
|
|
||||||
"_path": "rc/error",
|
|
||||||
"e": "sausage",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"error": "arbitrary error on input map[e:sausage]",
|
|
||||||
"input": rc.Params{
|
|
||||||
"e": "sausage",
|
|
||||||
},
|
|
||||||
"path": "rc/error",
|
|
||||||
"status": 500,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
in: rc.Params{
|
|
||||||
"_path": "bad/path",
|
|
||||||
"param": "sausage",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"error": "couldn't find path \"bad/path\"",
|
|
||||||
"input": rc.Params{
|
|
||||||
"param": "sausage",
|
|
||||||
},
|
|
||||||
"path": "bad/path",
|
|
||||||
"status": 404,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
in: rc.Params{
|
|
||||||
"_path": "test/needs_request",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"error": "can't run path \"test/needs_request\" as it needs the request",
|
|
||||||
"input": rc.Params{},
|
|
||||||
"path": "test/needs_request",
|
|
||||||
"status": 400,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
in: rc.Params{
|
|
||||||
"_path": "test/needs_response",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"error": "can't run path \"test/needs_response\" as it needs the response",
|
|
||||||
"input": rc.Params{},
|
|
||||||
"path": "test/needs_response",
|
|
||||||
"status": 400,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
in: rc.Params{
|
|
||||||
"nopath": "BOOM",
|
|
||||||
},
|
|
||||||
want: rc.Params{
|
|
||||||
"error": "Didn't find key \"_path\" in input",
|
|
||||||
"input": rc.Params{
|
|
||||||
"nopath": "BOOM",
|
|
||||||
},
|
|
||||||
"path": "",
|
|
||||||
"status": 400,
|
|
||||||
},
|
|
||||||
}} {
|
|
||||||
got := NewJobFromParams(ctx, test.in)
|
|
||||||
assert.Equal(t, test.want, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNewJobFromBytes(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
for _, test := range []struct {
|
|
||||||
in string
|
|
||||||
want string
|
|
||||||
}{{
|
|
||||||
in: `{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"a": "potato"
|
|
||||||
}`,
|
|
||||||
want: `{
|
|
||||||
"a": "potato"
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
in: `{
|
|
||||||
"_path": "rc/error",
|
|
||||||
"e": "sausage"
|
|
||||||
}`,
|
|
||||||
want: `{
|
|
||||||
"error": "arbitrary error on input map[e:sausage]",
|
|
||||||
"input": {
|
|
||||||
"e": "sausage"
|
|
||||||
},
|
|
||||||
"path": "rc/error",
|
|
||||||
"status": 500
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
in: `parse error`,
|
|
||||||
want: `{
|
|
||||||
"error": "invalid character 'p' looking for beginning of value",
|
|
||||||
"input": null,
|
|
||||||
"path": "unknown",
|
|
||||||
"status": 400
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
}, {
|
|
||||||
in: `"just a string"`,
|
|
||||||
want: `{
|
|
||||||
"error": "json: cannot unmarshal string into Go value of type rc.Params",
|
|
||||||
"input": null,
|
|
||||||
"path": "unknown",
|
|
||||||
"status": 400
|
|
||||||
}
|
|
||||||
`,
|
|
||||||
}} {
|
|
||||||
got := NewJobFromBytes(ctx, []byte(test.in))
|
|
||||||
assert.Equal(t, test.want, string(got))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobsBatch(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
|
|
||||||
call := rc.Calls.Get("job/batch")
|
|
||||||
assert.NotNil(t, call)
|
|
||||||
|
|
||||||
inJSON := `{
|
|
||||||
"inputs": [
|
|
||||||
{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"a": "potato"
|
|
||||||
},
|
|
||||||
"bad string",
|
|
||||||
{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"b": "sausage"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_path": "rc/error",
|
|
||||||
"e": "sausage"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_path": "bad/path",
|
|
||||||
"param": "sausage"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_path": "test/needs_request"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_path": "test/needs_response"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"nopath": "BOOM"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
`
|
|
||||||
var in rc.Params
|
|
||||||
require.NoError(t, json.Unmarshal([]byte(inJSON), &in))
|
|
||||||
|
|
||||||
wantJSON := `{
|
|
||||||
"results": [
|
|
||||||
{
|
|
||||||
"a": "potato"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "\"inputs\" items must be objects not string",
|
|
||||||
"input": null,
|
|
||||||
"path": "unknown",
|
|
||||||
"status": 400
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"b": "sausage"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "arbitrary error on input map[e:sausage]",
|
|
||||||
"input": {
|
|
||||||
"e": "sausage"
|
|
||||||
},
|
|
||||||
"path": "rc/error",
|
|
||||||
"status": 500
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "couldn't find path \"bad/path\"",
|
|
||||||
"input": {
|
|
||||||
"param": "sausage"
|
|
||||||
},
|
|
||||||
"path": "bad/path",
|
|
||||||
"status": 404
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "can't run path \"test/needs_request\" as it needs the request",
|
|
||||||
"input": {},
|
|
||||||
"path": "test/needs_request",
|
|
||||||
"status": 400
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "can't run path \"test/needs_response\" as it needs the response",
|
|
||||||
"input": {},
|
|
||||||
"path": "test/needs_response",
|
|
||||||
"status": 400
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"error": "Didn't find key \"_path\" in input",
|
|
||||||
"input": {
|
|
||||||
"nopath": "BOOM"
|
|
||||||
},
|
|
||||||
"path": "",
|
|
||||||
"status": 400
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
`
|
|
||||||
|
|
||||||
var want rc.Params
|
|
||||||
require.NoError(t, json.Unmarshal([]byte(wantJSON), &want))
|
|
||||||
|
|
||||||
out, err := call.Fn(ctx, in)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var got rc.Params
|
|
||||||
require.NoError(t, rc.Reshape(&got, out))
|
|
||||||
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestJobsBatchConcurrent(t *testing.T) {
|
|
||||||
ctx := context.Background()
|
|
||||||
for concurrency := range 10 {
|
|
||||||
in := rc.Params{}
|
|
||||||
var inputs []any
|
|
||||||
var results []rc.Params
|
|
||||||
for i := range 100 {
|
|
||||||
in := map[string]any{
|
|
||||||
"_path": "rc/noop",
|
|
||||||
"i": i,
|
|
||||||
}
|
|
||||||
inputs = append(inputs, in)
|
|
||||||
results = append(results, rc.Params{
|
|
||||||
"i": i,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
in["inputs"] = inputs
|
|
||||||
want := rc.Params{
|
|
||||||
"results": results,
|
|
||||||
}
|
|
||||||
|
|
||||||
if concurrency > 0 {
|
|
||||||
in["concurrency"] = concurrency
|
|
||||||
}
|
|
||||||
call := rc.Calls.Get("job/batch")
|
|
||||||
assert.NotNil(t, call)
|
|
||||||
|
|
||||||
got, err := call.Fn(ctx, in)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, want, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -154,37 +154,6 @@ func (os Options) NonDefault(m configmap.Getter) configmap.Simple {
|
|||||||
return nonDefault
|
return nonDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
// NonDefaultRC discovers which config values aren't at their default
|
|
||||||
//
|
|
||||||
// It expects a pointer to the current config struct in opts.
|
|
||||||
//
|
|
||||||
// It returns the overridden config in rc config format.
|
|
||||||
func (os Options) NonDefaultRC(opts any) (map[string]any, error) {
|
|
||||||
items, err := configstruct.Items(opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
itemsByName := map[string]*configstruct.Item{}
|
|
||||||
for i := range items {
|
|
||||||
item := &items[i]
|
|
||||||
itemsByName[item.Name] = item
|
|
||||||
}
|
|
||||||
var nonDefault = map[string]any{}
|
|
||||||
for i := range os {
|
|
||||||
opt := &os[i]
|
|
||||||
item, found := itemsByName[opt.Name]
|
|
||||||
if !found {
|
|
||||||
return nil, fmt.Errorf("key %q in OptionsInfo not found in Options struct", opt.Name)
|
|
||||||
}
|
|
||||||
value := fmt.Sprint(item.Value)
|
|
||||||
defaultValue := fmt.Sprint(opt.Default)
|
|
||||||
if value != defaultValue {
|
|
||||||
nonDefault[item.Field] = item.Value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nonDefault, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasAdvanced discovers if any options have an Advanced setting
|
// HasAdvanced discovers if any options have an Advanced setting
|
||||||
func (os Options) HasAdvanced() bool {
|
func (os Options) HasAdvanced() bool {
|
||||||
for i := range os {
|
for i := range os {
|
||||||
|
|||||||
@@ -1,308 +0,0 @@
|
|||||||
package fs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Check it satisfies the interface
|
|
||||||
var _ pflag.Value = (*Option)(nil)
|
|
||||||
|
|
||||||
func TestOption(t *testing.T) {
|
|
||||||
d := &Option{
|
|
||||||
Name: "potato",
|
|
||||||
Value: SizeSuffix(17 << 20),
|
|
||||||
}
|
|
||||||
assert.Equal(t, "17Mi", d.String())
|
|
||||||
assert.Equal(t, "SizeSuffix", d.Type())
|
|
||||||
err := d.Set("18M")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
|
||||||
err = d.Set("sdfsdf")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test options
|
|
||||||
var (
|
|
||||||
nouncOption = Option{
|
|
||||||
Name: "nounc",
|
|
||||||
}
|
|
||||||
copyLinksOption = Option{
|
|
||||||
Name: "copy_links",
|
|
||||||
Default: false,
|
|
||||||
NoPrefix: true,
|
|
||||||
ShortOpt: "L",
|
|
||||||
Advanced: true,
|
|
||||||
}
|
|
||||||
caseInsensitiveOption = Option{
|
|
||||||
Name: "case_insensitive",
|
|
||||||
Default: false,
|
|
||||||
Value: true,
|
|
||||||
Advanced: true,
|
|
||||||
}
|
|
||||||
testOptions = Options{nouncOption, copyLinksOption, caseInsensitiveOption}
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestOptionsSetValues(t *testing.T) {
|
|
||||||
assert.Nil(t, testOptions[0].Default)
|
|
||||||
assert.Equal(t, false, testOptions[1].Default)
|
|
||||||
assert.Equal(t, false, testOptions[2].Default)
|
|
||||||
testOptions.setValues()
|
|
||||||
assert.Equal(t, "", testOptions[0].Default)
|
|
||||||
assert.Equal(t, false, testOptions[1].Default)
|
|
||||||
assert.Equal(t, false, testOptions[2].Default)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionsGet(t *testing.T) {
|
|
||||||
opt := testOptions.Get("copy_links")
|
|
||||||
assert.Equal(t, ©LinksOption, opt)
|
|
||||||
opt = testOptions.Get("not_found")
|
|
||||||
assert.Nil(t, opt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionsOveridden(t *testing.T) {
|
|
||||||
m := configmap.New()
|
|
||||||
m1 := configmap.Simple{
|
|
||||||
"nounc": "m1",
|
|
||||||
"copy_links": "m1",
|
|
||||||
}
|
|
||||||
m.AddGetter(m1, configmap.PriorityNormal)
|
|
||||||
m2 := configmap.Simple{
|
|
||||||
"nounc": "m2",
|
|
||||||
"case_insensitive": "m2",
|
|
||||||
}
|
|
||||||
m.AddGetter(m2, configmap.PriorityConfig)
|
|
||||||
m3 := configmap.Simple{
|
|
||||||
"nounc": "m3",
|
|
||||||
}
|
|
||||||
m.AddGetter(m3, configmap.PriorityDefault)
|
|
||||||
got := testOptions.Overridden(m)
|
|
||||||
assert.Equal(t, configmap.Simple{
|
|
||||||
"copy_links": "m1",
|
|
||||||
"nounc": "m1",
|
|
||||||
}, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionsNonDefault(t *testing.T) {
|
|
||||||
m := configmap.Simple{}
|
|
||||||
got := testOptions.NonDefault(m)
|
|
||||||
assert.Equal(t, configmap.Simple{}, got)
|
|
||||||
|
|
||||||
m["case_insensitive"] = "false"
|
|
||||||
got = testOptions.NonDefault(m)
|
|
||||||
assert.Equal(t, configmap.Simple{}, got)
|
|
||||||
|
|
||||||
m["case_insensitive"] = "true"
|
|
||||||
got = testOptions.NonDefault(m)
|
|
||||||
assert.Equal(t, configmap.Simple{"case_insensitive": "true"}, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionMarshalJSON(t *testing.T) {
|
|
||||||
out, err := json.MarshalIndent(&caseInsensitiveOption, "", "")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
require.Equal(t, `{
|
|
||||||
"Name": "case_insensitive",
|
|
||||||
"FieldName": "",
|
|
||||||
"Help": "",
|
|
||||||
"Default": false,
|
|
||||||
"Value": true,
|
|
||||||
"Hide": 0,
|
|
||||||
"Required": false,
|
|
||||||
"IsPassword": false,
|
|
||||||
"NoPrefix": false,
|
|
||||||
"Advanced": true,
|
|
||||||
"Exclusive": false,
|
|
||||||
"Sensitive": false,
|
|
||||||
"DefaultStr": "false",
|
|
||||||
"ValueStr": "true",
|
|
||||||
"Type": "bool"
|
|
||||||
}`, string(out))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionGetValue(t *testing.T) {
|
|
||||||
assert.Equal(t, "", nouncOption.GetValue())
|
|
||||||
assert.Equal(t, false, copyLinksOption.GetValue())
|
|
||||||
assert.Equal(t, true, caseInsensitiveOption.GetValue())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionString(t *testing.T) {
|
|
||||||
assert.Equal(t, "", nouncOption.String())
|
|
||||||
assert.Equal(t, "false", copyLinksOption.String())
|
|
||||||
assert.Equal(t, "true", caseInsensitiveOption.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionStringStringArray(t *testing.T) {
|
|
||||||
opt := Option{
|
|
||||||
Name: "string_array",
|
|
||||||
Default: []string(nil),
|
|
||||||
}
|
|
||||||
assert.Equal(t, "", opt.String())
|
|
||||||
opt.Default = []string{}
|
|
||||||
assert.Equal(t, "", opt.String())
|
|
||||||
opt.Default = []string{"a", "b"}
|
|
||||||
assert.Equal(t, "a,b", opt.String())
|
|
||||||
opt.Default = []string{"hello, world!", "goodbye, world!"}
|
|
||||||
assert.Equal(t, `"hello, world!","goodbye, world!"`, opt.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionStringSizeSuffix(t *testing.T) {
|
|
||||||
opt := Option{
|
|
||||||
Name: "size_suffix",
|
|
||||||
Default: SizeSuffix(0),
|
|
||||||
}
|
|
||||||
assert.Equal(t, "0", opt.String())
|
|
||||||
opt.Default = SizeSuffix(-1)
|
|
||||||
assert.Equal(t, "off", opt.String())
|
|
||||||
opt.Default = SizeSuffix(100)
|
|
||||||
assert.Equal(t, "100B", opt.String())
|
|
||||||
opt.Default = SizeSuffix(1024)
|
|
||||||
assert.Equal(t, "1Ki", opt.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionSet(t *testing.T) {
|
|
||||||
o := caseInsensitiveOption
|
|
||||||
assert.Equal(t, true, o.Value)
|
|
||||||
err := o.Set("FALSE")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, false, o.Value)
|
|
||||||
|
|
||||||
o = copyLinksOption
|
|
||||||
assert.Equal(t, nil, o.Value)
|
|
||||||
err = o.Set("True")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, true, o.Value)
|
|
||||||
|
|
||||||
err = o.Set("INVALID")
|
|
||||||
assert.Error(t, err)
|
|
||||||
assert.Equal(t, true, o.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionType(t *testing.T) {
|
|
||||||
assert.Equal(t, "string", nouncOption.Type())
|
|
||||||
assert.Equal(t, "bool", copyLinksOption.Type())
|
|
||||||
assert.Equal(t, "bool", caseInsensitiveOption.Type())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionFlagName(t *testing.T) {
|
|
||||||
assert.Equal(t, "local-nounc", nouncOption.FlagName("local"))
|
|
||||||
assert.Equal(t, "copy-links", copyLinksOption.FlagName("local"))
|
|
||||||
assert.Equal(t, "local-case-insensitive", caseInsensitiveOption.FlagName("local"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionEnvVarName(t *testing.T) {
|
|
||||||
assert.Equal(t, "RCLONE_LOCAL_NOUNC", nouncOption.EnvVarName("local"))
|
|
||||||
assert.Equal(t, "RCLONE_LOCAL_COPY_LINKS", copyLinksOption.EnvVarName("local"))
|
|
||||||
assert.Equal(t, "RCLONE_LOCAL_CASE_INSENSITIVE", caseInsensitiveOption.EnvVarName("local"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionGetters(t *testing.T) {
|
|
||||||
// Set up env vars
|
|
||||||
envVars := [][2]string{
|
|
||||||
{"RCLONE_CONFIG_LOCAL_POTATO_PIE", "yes"},
|
|
||||||
{"RCLONE_COPY_LINKS", "TRUE"},
|
|
||||||
{"RCLONE_LOCAL_NOUNC", "NOUNC"},
|
|
||||||
}
|
|
||||||
for _, ev := range envVars {
|
|
||||||
assert.NoError(t, os.Setenv(ev[0], ev[1]))
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
for _, ev := range envVars {
|
|
||||||
assert.NoError(t, os.Unsetenv(ev[0]))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
oldConfigFileGet := ConfigFileGet
|
|
||||||
ConfigFileGet = func(section, key string) (string, bool) {
|
|
||||||
if section == "sausage" && key == "key1" {
|
|
||||||
return "value1", true
|
|
||||||
}
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
ConfigFileGet = oldConfigFileGet
|
|
||||||
}()
|
|
||||||
|
|
||||||
// set up getters
|
|
||||||
|
|
||||||
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
|
|
||||||
configEnvVarsGetter := configEnvVars("local")
|
|
||||||
|
|
||||||
// A configmap.Getter to read from the environment RCLONE_option_name
|
|
||||||
optionEnvVarsGetter := optionEnvVars{"local", testOptions}
|
|
||||||
|
|
||||||
// A configmap.Getter to read either the default value or the set
|
|
||||||
// value from the RegInfo.Options
|
|
||||||
regInfoValuesGetterFalse := ®InfoValues{
|
|
||||||
options: testOptions,
|
|
||||||
useDefault: false,
|
|
||||||
}
|
|
||||||
regInfoValuesGetterTrue := ®InfoValues{
|
|
||||||
options: testOptions,
|
|
||||||
useDefault: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// A configmap.Setter to read from the config file
|
|
||||||
configFileGetter := getConfigFile("sausage")
|
|
||||||
|
|
||||||
for i, test := range []struct {
|
|
||||||
get configmap.Getter
|
|
||||||
key string
|
|
||||||
wantValue string
|
|
||||||
wantOk bool
|
|
||||||
}{
|
|
||||||
{configEnvVarsGetter, "not_found", "", false},
|
|
||||||
{configEnvVarsGetter, "potato_pie", "yes", true},
|
|
||||||
{optionEnvVarsGetter, "not_found", "", false},
|
|
||||||
{optionEnvVarsGetter, "copy_links", "TRUE", true},
|
|
||||||
{optionEnvVarsGetter, "nounc", "NOUNC", true},
|
|
||||||
{optionEnvVarsGetter, "case_insensitive", "", false},
|
|
||||||
{regInfoValuesGetterFalse, "not_found", "", false},
|
|
||||||
{regInfoValuesGetterFalse, "case_insensitive", "true", true},
|
|
||||||
{regInfoValuesGetterFalse, "copy_links", "", false},
|
|
||||||
{regInfoValuesGetterTrue, "not_found", "", false},
|
|
||||||
{regInfoValuesGetterTrue, "case_insensitive", "true", true},
|
|
||||||
{regInfoValuesGetterTrue, "copy_links", "false", true},
|
|
||||||
{configFileGetter, "not_found", "", false},
|
|
||||||
{configFileGetter, "key1", "value1", true},
|
|
||||||
} {
|
|
||||||
what := fmt.Sprintf("%d: %+v: %q", i, test.get, test.key)
|
|
||||||
gotValue, gotOk := test.get.Get(test.key)
|
|
||||||
assert.Equal(t, test.wantValue, gotValue, what)
|
|
||||||
assert.Equal(t, test.wantOk, gotOk, what)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionsNonDefaultRC(t *testing.T) {
|
|
||||||
type cfg struct {
|
|
||||||
X string `config:"x"`
|
|
||||||
Y int `config:"y"`
|
|
||||||
}
|
|
||||||
c := &cfg{X: "a", Y: 6}
|
|
||||||
opts := Options{
|
|
||||||
{Name: "x", Default: "a"}, // at default, should be omitted
|
|
||||||
{Name: "y", Default: 5}, // non-default, should be included
|
|
||||||
}
|
|
||||||
|
|
||||||
got, err := opts.NonDefaultRC(c)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, map[string]any{"Y": 6}, got)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOptionsNonDefaultRCMissingKey(t *testing.T) {
|
|
||||||
type cfg struct {
|
|
||||||
X string `config:"x"`
|
|
||||||
}
|
|
||||||
c := &cfg{X: "a"}
|
|
||||||
// Options refers to a key not present in the struct -> expect error
|
|
||||||
opts := Options{{Name: "missing", Default: ""}}
|
|
||||||
_, err := opts.NonDefaultRC(c)
|
|
||||||
assert.ErrorContains(t, err, "not found")
|
|
||||||
}
|
|
||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
"github.com/rclone/rclone/fs/cluster"
|
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
@@ -98,7 +97,6 @@ type syncCopyMove struct {
|
|||||||
setDirModTimesMaxLevel int // max level of the directories to set
|
setDirModTimesMaxLevel int // max level of the directories to set
|
||||||
modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter)
|
modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter)
|
||||||
allowOverlap bool // whether we allow src and dst to overlap (i.e. for convmv)
|
allowOverlap bool // whether we allow src and dst to overlap (i.e. for convmv)
|
||||||
cluster *cluster.Cluster // non-nil to run sync via cluster
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// For keeping track of delayed modtime sets
|
// For keeping track of delayed modtime sets
|
||||||
@@ -166,7 +164,6 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
|
|||||||
setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite),
|
setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite),
|
||||||
modifiedDirs: make(map[string]struct{}),
|
modifiedDirs: make(map[string]struct{}),
|
||||||
allowOverlap: allowOverlap,
|
allowOverlap: allowOverlap,
|
||||||
cluster: cluster.GetCluster(ctx),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger, s.usingLogger = operations.GetLogger(ctx)
|
s.logger, s.usingLogger = operations.GetLogger(ctx)
|
||||||
@@ -499,25 +496,13 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
|
|||||||
dst := pair.Dst
|
dst := pair.Dst
|
||||||
if s.DoMove {
|
if s.DoMove {
|
||||||
if src != dst {
|
if src != dst {
|
||||||
if s.cluster != nil {
|
_, err = operations.MoveTransfer(ctx, fdst, dst, src.Remote(), src)
|
||||||
err = s.cluster.Move(ctx, fdst, dst, src.Remote(), src)
|
|
||||||
} else {
|
|
||||||
_, err = operations.MoveTransfer(ctx, fdst, dst, src.Remote(), src)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// src == dst signals delete the src
|
// src == dst signals delete the src
|
||||||
if s.cluster != nil {
|
err = operations.DeleteFile(ctx, src)
|
||||||
err = s.cluster.DeleteFile(ctx, src)
|
|
||||||
} else {
|
|
||||||
err = operations.DeleteFile(ctx, src)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if s.cluster != nil {
|
_, err = operations.Copy(ctx, fdst, dst, src.Remote(), src)
|
||||||
err = s.cluster.Copy(ctx, fdst, dst, src.Remote(), src)
|
|
||||||
} else {
|
|
||||||
_, err = operations.Copy(ctx, fdst, dst, src.Remote(), src)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
s.processError(err)
|
s.processError(err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -554,13 +539,8 @@ func (s *syncCopyMove) startTransfers() {
|
|||||||
// This stops the background transfers
|
// This stops the background transfers
|
||||||
func (s *syncCopyMove) stopTransfers() {
|
func (s *syncCopyMove) stopTransfers() {
|
||||||
s.toBeUploaded.Close()
|
s.toBeUploaded.Close()
|
||||||
s.transfersWg.Wait()
|
|
||||||
fs.Debugf(s.fdst, "Waiting for transfers to finish")
|
fs.Debugf(s.fdst, "Waiting for transfers to finish")
|
||||||
if s.cluster != nil {
|
s.transfersWg.Wait()
|
||||||
fs.Debugf(s.fdst, "Waiting for cluster transfers to finish")
|
|
||||||
s.processError(s.cluster.Sync(s.ctx))
|
|
||||||
fs.Debugf(s.fdst, "Cluster transfers finished")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This starts the background renamers.
|
// This starts the background renamers.
|
||||||
|
|||||||
@@ -8,12 +8,10 @@ PORT=28628
|
|||||||
. $(dirname "$0")/docker.bash
|
. $(dirname "$0")/docker.bash
|
||||||
|
|
||||||
start() {
|
start() {
|
||||||
# We need to replace the remakerings in the container to create Policy-1.
|
|
||||||
docker run --rm -d --name ${NAME} \
|
docker run --rm -d --name ${NAME} \
|
||||||
-p 127.0.0.1:${PORT}:8080 \
|
-p 127.0.0.1:${PORT}:8080 \
|
||||||
-v $(dirname "$0")/TestSwiftAIO.d/remakerings:/etc/swift/remakerings:ro \
|
bouncestorage/swift-aio
|
||||||
openstackswift/saio
|
|
||||||
|
|
||||||
echo type=swift
|
echo type=swift
|
||||||
echo env_auth=false
|
echo env_auth=false
|
||||||
echo user=test:tester
|
echo user=test:tester
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
if ! grep -q "^\[storage-policy:1\]" swift.conf; then
|
|
||||||
cat <<EOF >> swift.conf
|
|
||||||
|
|
||||||
[storage-policy:1]
|
|
||||||
name = Policy-1
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
|
|
||||||
|
|
||||||
swift-ring-builder object.builder create 10 1 1
|
|
||||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d0 1
|
|
||||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d1 1
|
|
||||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d2 1
|
|
||||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d3 1
|
|
||||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d4 1
|
|
||||||
swift-ring-builder object.builder add r1z1-127.0.0.1:6200/swift-d5 1
|
|
||||||
swift-ring-builder object.builder rebalance
|
|
||||||
swift-ring-builder container.builder create 10 1 1
|
|
||||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d0 1
|
|
||||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d1 1
|
|
||||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d2 1
|
|
||||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d3 1
|
|
||||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d4 1
|
|
||||||
swift-ring-builder container.builder add r1z1-127.0.0.1:6201/swift-d5 1
|
|
||||||
swift-ring-builder container.builder rebalance
|
|
||||||
swift-ring-builder account.builder create 10 1 1
|
|
||||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d0 1
|
|
||||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d1 1
|
|
||||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d2 1
|
|
||||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d3 1
|
|
||||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d4 1
|
|
||||||
swift-ring-builder account.builder add r1z1-127.0.0.1:6202/swift-d5 1
|
|
||||||
swift-ring-builder account.builder rebalance
|
|
||||||
|
|
||||||
# For Policy-1:
|
|
||||||
swift-ring-builder object-1.builder create 10 1 1
|
|
||||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d0 1
|
|
||||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d1 1
|
|
||||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d2 1
|
|
||||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d3 1
|
|
||||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d4 1
|
|
||||||
swift-ring-builder object-1.builder add r1z1-127.0.0.1:6200/swift-d5 1
|
|
||||||
swift-ring-builder object-1.builder rebalance
|
|
||||||
@@ -8,11 +8,9 @@ PORT=28632
|
|||||||
. $(dirname "$0")/docker.bash
|
. $(dirname "$0")/docker.bash
|
||||||
|
|
||||||
start() {
|
start() {
|
||||||
# We need to replace the remakerings in the container to create Policy-1.
|
|
||||||
docker run --rm -d --name ${NAME} \
|
docker run --rm -d --name ${NAME} \
|
||||||
-p 127.0.0.1:${PORT}:8080 \
|
-p 127.0.0.1:${PORT}:8080 \
|
||||||
-v $(dirname "$0")/TestSwiftAIO.d/remakerings:/etc/swift/remakerings:ro \
|
bouncestorage/swift-aio
|
||||||
openstackswift/saio
|
|
||||||
|
|
||||||
echo type=swift
|
echo type=swift
|
||||||
echo env_auth=false
|
echo env_auth=false
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ func start(name string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// If we got a _connect value then try to connect to it
|
// If we got a _connect value then try to connect to it
|
||||||
const maxTries = 100
|
const maxTries = 30
|
||||||
var rdBuf = make([]byte, 1)
|
var rdBuf = make([]byte, 1)
|
||||||
for i := 1; i <= maxTries; i++ {
|
for i := 1; i <= maxTries; i++ {
|
||||||
if i != 0 {
|
if i != 0 {
|
||||||
@@ -175,16 +175,7 @@ func Start(remoteName string) (fn func(), err error) {
|
|||||||
if running[name] <= 0 {
|
if running[name] <= 0 {
|
||||||
// if server isn't running check to see if this server has
|
// if server isn't running check to see if this server has
|
||||||
// been started already but not by us and stop it if so
|
// been started already but not by us and stop it if so
|
||||||
const maxTries = 10
|
if os.Getenv(envKey(name, "type")) == "" && isRunning(name) {
|
||||||
for i := 1; i <= maxTries; i++ {
|
|
||||||
if os.Getenv(envKey(name, "type")) == "" && !isRunning(name) {
|
|
||||||
fs.Logf(name, "Stopped server")
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if i != 1 {
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
fs.Logf(name, "Attempting to stop %s try %d/%d", name, i, maxTries)
|
|
||||||
}
|
|
||||||
stop(name)
|
stop(name)
|
||||||
}
|
}
|
||||||
if !isRunning(name) {
|
if !isRunning(name) {
|
||||||
@@ -220,6 +211,6 @@ func stop(name string) {
|
|||||||
fs.Errorf(name, "Failed to stop server: %v", err)
|
fs.Errorf(name, "Failed to stop server: %v", err)
|
||||||
}
|
}
|
||||||
running[name] = 0
|
running[name] = 0
|
||||||
fs.Logf(name, "Stopping server")
|
fs.Logf(name, "Stopped server")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
4
go.mod
4
go.mod
@@ -71,7 +71,7 @@ require (
|
|||||||
github.com/spf13/cobra v1.10.1
|
github.com/spf13/cobra v1.10.1
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c
|
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
|
||||||
github.com/unknwon/goconfig v1.0.0
|
github.com/unknwon/goconfig v1.0.0
|
||||||
github.com/willscott/go-nfs v0.0.3
|
github.com/willscott/go-nfs v0.0.3
|
||||||
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471
|
github.com/winfsp/cgofuse v1.6.1-0.20250813110601-7d90b0992471
|
||||||
@@ -127,7 +127,6 @@ require (
|
|||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.0 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sts v1.38.5 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
|
|
||||||
github.com/bradenaw/juniper v0.15.3 // indirect
|
github.com/bradenaw/juniper v0.15.3 // indirect
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||||
@@ -247,7 +246,6 @@ require (
|
|||||||
github.com/ProtonMail/go-crypto v1.3.0
|
github.com/ProtonMail/go-crypto v1.3.0
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.2
|
github.com/golang-jwt/jwt/v4 v4.5.2
|
||||||
github.com/pkg/xattr v0.4.12
|
github.com/pkg/xattr v0.4.12
|
||||||
github.com/pquerna/otp v1.5.0
|
|
||||||
golang.org/x/mobile v0.0.0-20250911085028-6912353760cf
|
golang.org/x/mobile v0.0.0-20250911085028-6912353760cf
|
||||||
golang.org/x/term v0.35.0
|
golang.org/x/term v0.35.0
|
||||||
)
|
)
|
||||||
|
|||||||
6
go.sum
6
go.sum
@@ -150,8 +150,6 @@ github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
|
|||||||
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
|
|
||||||
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
|
||||||
github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo=
|
github.com/bradenaw/juniper v0.15.3 h1:RHIAMEDTpvmzV1wg1jMAHGOoI2oJUSPx3lxRldXnFGo=
|
||||||
github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
github.com/bradenaw/juniper v0.15.3/go.mod h1:UX4FX57kVSaDp4TPqvSjkAAewmRFAfXf27BOs5z9dq8=
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8=
|
||||||
@@ -514,8 +512,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
|||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/pquerna/otp v1.5.0 h1:NMMR+WrmaqXU4EzdGJEE1aUUI0AMRzsp96fFFWNPwxs=
|
|
||||||
github.com/pquerna/otp v1.5.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
|
|
||||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@@ -600,8 +596,6 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu
|
|||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc=
|
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc=
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY=
|
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY=
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c h1:BLopNCyqewbE8+BtlIp/Juzu8AJGxz0gHdGADnsblVc=
|
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20250926104142-ccb8d3498e6c/go.mod h1:ykucQyiE9Q2qx1wLlEtZkkNn1IURib/2O+Mvd25i1Fo=
|
|
||||||
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
||||||
github.com/tinylib/msgp v1.4.0 h1:SYOeDRiydzOw9kSiwdYp9UcBgPFtLU2WDHaJXyHruf8=
|
github.com/tinylib/msgp v1.4.0 h1:SYOeDRiydzOw9kSiwdYp9UcBgPFtLU2WDHaJXyHruf8=
|
||||||
github.com/tinylib/msgp v1.4.0/go.mod h1:cvjFkb4RiC8qSBOPMGPSzSAx47nAsfhLVTCZZNuHv5o=
|
github.com/tinylib/msgp v1.4.0/go.mod h1:cvjFkb4RiC8qSBOPMGPSzSAx47nAsfhLVTCZZNuHv5o=
|
||||||
|
|||||||
@@ -250,7 +250,9 @@ func (ts *TokenSource) reReadToken() (changed bool) {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if newToken.Valid() {
|
if !newToken.Valid() {
|
||||||
|
fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring")
|
||||||
|
} else {
|
||||||
fs.Debugf(ts.name, "Loaded fresh token from config file")
|
fs.Debugf(ts.name, "Loaded fresh token from config file")
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
@@ -262,8 +264,6 @@ func (ts *TokenSource) reReadToken() (changed bool) {
|
|||||||
if changed {
|
if changed {
|
||||||
ts.token = newToken
|
ts.token = newToken
|
||||||
ts.tokenSource = nil // invalidate since we changed the token
|
ts.tokenSource = nil // invalidate since we changed the token
|
||||||
} else {
|
|
||||||
fs.Debugf(ts.name, "No updated token found in the config file")
|
|
||||||
}
|
}
|
||||||
return changed
|
return changed
|
||||||
}
|
}
|
||||||
@@ -319,8 +319,6 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||||||
return ts.token, nil
|
return ts.token, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.Debug(ts.name, "Token expired")
|
|
||||||
|
|
||||||
// Try getting the token a few times
|
// Try getting the token a few times
|
||||||
for i := 1; i <= maxTries; i++ {
|
for i := 1; i <= maxTries; i++ {
|
||||||
// Try reading the token from the config file in case it has
|
// Try reading the token from the config file in case it has
|
||||||
@@ -346,7 +344,6 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||||||
|
|
||||||
token, err = ts.tokenSource.Token()
|
token, err = ts.tokenSource.Token()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debug(ts.name, "Token refresh successful")
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if newErr := maybeWrapOAuthError(err, ts.name); newErr != err {
|
if newErr := maybeWrapOAuthError(err, ts.name); newErr != err {
|
||||||
|
|||||||
@@ -47,14 +47,16 @@ func (r *Renew) renewOnExpiry() {
|
|||||||
}
|
}
|
||||||
uploads := r.uploads.Load()
|
uploads := r.uploads.Load()
|
||||||
if uploads != 0 {
|
if uploads != 0 {
|
||||||
fs.Debugf(r.name, "Background refresher detected expired token - %d uploads in progress - refreshing", uploads)
|
fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads)
|
||||||
// Do a transaction
|
// Do a transaction
|
||||||
err := r.run()
|
err := r.run()
|
||||||
if err != nil {
|
if err == nil {
|
||||||
fs.Errorf(r.name, "Background token refresher failed: %v", err)
|
fs.Debugf(r.name, "Token refresh successful")
|
||||||
|
} else {
|
||||||
|
fs.Errorf(r.name, "Token refresh failed: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(r.name, "Background refresher detected expired token but no uploads in progress - doing nothing")
|
fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user