mirror of
https://github.com/rclone/rclone.git
synced 2026-01-28 15:23:26 +00:00
Compare commits
3 Commits
feat-webda
...
fix-log-fa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
661fa5786d | ||
|
|
2fe4fe2766 | ||
|
|
40ac32f2a6 |
@@ -20,7 +20,7 @@ var (
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
configfile.LoadConfig(context.Background())
|
||||
require.NoError(t, configfile.LoadConfig(context.Background()))
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -70,11 +69,12 @@ func init() {
|
||||
Prefix: "acd",
|
||||
Description: "Amazon Drive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "checkpoint",
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -84,7 +83,7 @@ func init() {
|
||||
Name: "box",
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
@@ -93,15 +92,16 @@ func init() {
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token with jwt authentication")
|
||||
}
|
||||
// Else, if not using an access token, use oauth2
|
||||
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
||||
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token with oauth authentication")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "root_folder_id",
|
||||
@@ -157,15 +157,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get box config")
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get decrypted private key")
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "get claims")
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
|
||||
2
backend/cache/cache_internal_test.go
vendored
2
backend/cache/cache_internal_test.go
vendored
@@ -836,7 +836,7 @@ func newRun() *run {
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create temp dir: %v", err)
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
} else {
|
||||
r.tmpUploadDir = uploadDir
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -183,13 +182,12 @@ func init() {
|
||||
Description: "Google Drive",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "couldn't parse config into struct")
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
@@ -202,13 +200,14 @@ func init() {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
}
|
||||
err = configTeamDrive(ctx, opt, m, name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Shared Drive: %v", err)
|
||||
return errors.Wrap(err, "failed to configure Shared Drive")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
@@ -522,7 +521,7 @@ If this flag is set then rclone will ignore shortcut files completely.
|
||||
} {
|
||||
for mimeType, extension := range m {
|
||||
if err := mime.AddExtensionType(extension, mimeType); err != nil {
|
||||
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -144,7 +143,7 @@ func init() {
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{
|
||||
@@ -153,8 +152,9 @@ func init() {
|
||||
}
|
||||
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
|
||||
@@ -35,9 +35,7 @@ func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(ctx context.Context, name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -76,17 +75,18 @@ func init() {
|
||||
Prefix: "gcs",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "project_number",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -78,13 +77,12 @@ func init() {
|
||||
Prefix: "gphotos",
|
||||
Description: "Google Photos",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "couldn't parse config into struct")
|
||||
}
|
||||
|
||||
// Fill in the scopes
|
||||
@@ -97,7 +95,7 @@ func init() {
|
||||
// Do the oauth
|
||||
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
golog.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
|
||||
// Warn the user
|
||||
@@ -108,6 +106,7 @@ func init() {
|
||||
|
||||
`)
|
||||
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "read_only",
|
||||
|
||||
@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
configfile.LoadConfig(context.Background())
|
||||
require.NoError(t, configfile.LoadConfig(context.Background()))
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -56,11 +55,12 @@ func init() {
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -87,12 +86,12 @@ func init() {
|
||||
Name: "jottacloud",
|
||||
Description: "Jottacloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
return errors.Wrap(err, "failed to parse config version - corrupted config")
|
||||
}
|
||||
refresh = (ver != configVersion) && (ver != v1configVersion)
|
||||
}
|
||||
@@ -104,7 +103,7 @@ func init() {
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -116,11 +115,13 @@ func init() {
|
||||
|
||||
switch config.ChooseNumber("Your choice", 1, 3) {
|
||||
case 1:
|
||||
v2config(ctx, name, m)
|
||||
return v2config(ctx, name, m)
|
||||
case 2:
|
||||
v1config(ctx, name, m)
|
||||
return v1config(ctx, name, m)
|
||||
case 3:
|
||||
teliaCloudConfig(ctx, name, m)
|
||||
return teliaCloudConfig(ctx, name, m)
|
||||
default:
|
||||
return errors.New("unknown config choice")
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -242,7 +243,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
teliaCloudOauthConfig := &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
@@ -255,15 +256,14 @@ func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
return errors.Wrap(err, "failed to load oAuthClient")
|
||||
}
|
||||
|
||||
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
@@ -271,7 +271,7 @@ func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
return errors.Wrap(err, "failed to setup mountpoint")
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
@@ -280,17 +280,18 @@ func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
// v1config configure a jottacloud backend using legacy authentication
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
func v1config(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm(false) {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
return errors.Wrap(err, "failed to register device")
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
@@ -318,18 +319,18 @@ func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
token, err := doAuthV1(ctx, srv, username, password)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
return errors.Wrap(err, "failed to get oauth token")
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
return errors.Wrap(err, "error while saving token")
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
return errors.Wrap(err, "failed to load oAuthClient")
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
@@ -337,13 +338,14 @@ func v1config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
return errors.Wrap(err, "failed to setup mountpoint")
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(v1configVersion))
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
@@ -418,7 +420,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
|
||||
}
|
||||
|
||||
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
func v2config(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
@@ -430,31 +432,32 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
|
||||
token, err := doAuthV2(ctx, srv, loginToken, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
return errors.Wrap(err, "failed to get oauth token")
|
||||
}
|
||||
err = oauthutil.PutToken(name, m, &token, true)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while saving token: %s", err)
|
||||
return errors.Wrap(err, "error while saving token")
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
return errors.Wrap(err, "failed to load oAuthClient")
|
||||
}
|
||||
|
||||
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup mountpoint: %s", err)
|
||||
return errors.Wrap(err, "failed to setup mountpoint")
|
||||
}
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
return nil
|
||||
}
|
||||
|
||||
// doAuthV2 runs the actual token request for V2 authentication
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -48,7 +48,7 @@ func (w *BinWriter) Reader() io.Reader {
|
||||
// WritePu16 writes a short as unsigned varint
|
||||
func (w *BinWriter) WritePu16(val int) {
|
||||
if val < 0 || val > 65535 {
|
||||
log.Fatalf("Invalid UInt16 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt16 %v", val))
|
||||
}
|
||||
w.WritePu64(int64(val))
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (w *BinWriter) WritePu16(val int) {
|
||||
// WritePu32 writes a signed long as unsigned varint
|
||||
func (w *BinWriter) WritePu32(val int64) {
|
||||
if val < 0 || val > 4294967295 {
|
||||
log.Fatalf("Invalid UInt32 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt32 %v", val))
|
||||
}
|
||||
w.WritePu64(val)
|
||||
}
|
||||
@@ -64,7 +64,7 @@ func (w *BinWriter) WritePu32(val int64) {
|
||||
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
|
||||
func (w *BinWriter) WritePu64(val int64) {
|
||||
if val < 0 {
|
||||
log.Fatalf("Invalid UInt64 %v", val)
|
||||
panic(fmt.Sprintf("Invalid UInt64 %v", val))
|
||||
}
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
@@ -123,7 +123,7 @@ func (r *BinReader) check(err error) bool {
|
||||
r.err = err
|
||||
}
|
||||
if err != io.EOF {
|
||||
log.Fatalf("Error parsing response: %v", err)
|
||||
panic(fmt.Sprintf("Error parsing response: %v", err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -99,7 +98,7 @@ func init() {
|
||||
Name: "onedrive",
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
region, _ := m.Get("region")
|
||||
graphURL := graphAPIEndpoint[region] + "/v1.0"
|
||||
oauthConfig.Endpoint = oauth2.Endpoint{
|
||||
@@ -109,13 +108,12 @@ func init() {
|
||||
ci := fs.GetConfig(ctx)
|
||||
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
type driveResource struct {
|
||||
@@ -138,7 +136,7 @@ func init() {
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||
return errors.Wrap(err, "failed to configure OneDrive")
|
||||
}
|
||||
srv := rest.NewClient(oAuthClient)
|
||||
|
||||
@@ -203,18 +201,17 @@ func init() {
|
||||
sites := siteResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available sites: %v", err)
|
||||
return errors.Wrap(err, "failed to query available sites")
|
||||
}
|
||||
|
||||
if len(sites.Sites) == 0 {
|
||||
log.Fatalf("Search for '%s' returned no results", searchTerm)
|
||||
} else {
|
||||
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
|
||||
for index, site := range sites.Sites {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
|
||||
}
|
||||
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
|
||||
return errors.Errorf("search for %q returned no results", searchTerm)
|
||||
}
|
||||
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
|
||||
for index, site := range sites.Sites {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
|
||||
}
|
||||
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
|
||||
}
|
||||
|
||||
// if we use server-relative URL for finding the drive
|
||||
@@ -227,7 +224,7 @@ func init() {
|
||||
site := siteResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &site)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available site by relative path: %v", err)
|
||||
return errors.Wrap(err, "failed to query available site by relative path")
|
||||
}
|
||||
siteID = site.SiteID
|
||||
}
|
||||
@@ -247,7 +244,7 @@ func init() {
|
||||
drives := drivesResponse{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
return errors.Wrap(err, "failed to query available drives")
|
||||
}
|
||||
|
||||
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
|
||||
@@ -256,7 +253,7 @@ func init() {
|
||||
meDrive := driveResource{}
|
||||
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query available drives: %v", err)
|
||||
return errors.Wrap(err, "failed to query available drives")
|
||||
}
|
||||
found := false
|
||||
for _, drive := range drives.Drives {
|
||||
@@ -273,14 +270,13 @@ func init() {
|
||||
}
|
||||
|
||||
if len(drives.Drives) == 0 {
|
||||
log.Fatalf("No drives found")
|
||||
} else {
|
||||
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
|
||||
for index, drive := range drives.Drives {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
|
||||
}
|
||||
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
|
||||
return errors.New("no drives found")
|
||||
}
|
||||
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
|
||||
for index, drive := range drives.Drives {
|
||||
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
|
||||
}
|
||||
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
|
||||
}
|
||||
|
||||
// Test the driveID and get drive type
|
||||
@@ -291,17 +287,18 @@ func init() {
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
|
||||
return errors.Wrapf(err, "failed to query root for drive %s", finalDriveID)
|
||||
}
|
||||
|
||||
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
|
||||
// This does not work, YET :)
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
|
||||
log.Fatalf("Cancelled by user")
|
||||
return errors.New("cancelled by user")
|
||||
}
|
||||
|
||||
m.Set(configDriveID, finalDriveID)
|
||||
m.Set(configDriveType, rootItem.ParentReference.DriveType)
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -72,7 +71,7 @@ func init() {
|
||||
Name: "pcloud",
|
||||
Description: "Pcloud",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
optc := new(Options)
|
||||
err := configstruct.Set(m, optc)
|
||||
if err != nil {
|
||||
@@ -100,8 +99,9 @@ func init() {
|
||||
}
|
||||
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -78,11 +77,12 @@ func init() {
|
||||
Name: "premiumizeme",
|
||||
Description: "premiumize.me",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "api_key",
|
||||
|
||||
@@ -2,10 +2,10 @@ package putio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -60,14 +60,15 @@ func init() {
|
||||
Name: "putio",
|
||||
Description: "Put.io",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
opt := oauthutil.Options{
|
||||
NoOffline: true,
|
||||
}
|
||||
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
|
||||
@@ -296,36 +296,32 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
// Config callback for 2FA
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
func Config(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
serverURL, ok := m.Get(configURL)
|
||||
if !ok || serverURL == "" {
|
||||
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
|
||||
fmt.Print("\nOperation not supported on this remote.\nIf you need a 2FA code on your account, use the command:\n\nrclone config reconnect <remote name>:\n\n")
|
||||
return
|
||||
return errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: nrclone config reconnect <remote name>: ")
|
||||
}
|
||||
|
||||
// Stop if we are running non-interactive config
|
||||
if ci.AutoConfirm {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
u, err := url.Parse(serverURL)
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "Invalid server URL %s", serverURL)
|
||||
return
|
||||
return errors.Errorf("invalid server URL %s", serverURL)
|
||||
}
|
||||
|
||||
is2faEnabled, _ := m.Get(config2FA)
|
||||
if is2faEnabled != "true" {
|
||||
fmt.Println("Two-factor authentication is not enabled on this account.")
|
||||
return
|
||||
return errors.New("two-factor authentication is not enabled on this account")
|
||||
}
|
||||
|
||||
username, _ := m.Get(configUser)
|
||||
if username == "" {
|
||||
fs.Errorf(nil, "A username is required")
|
||||
return
|
||||
return errors.New("a username is required")
|
||||
}
|
||||
|
||||
password, _ := m.Get(configPassword)
|
||||
@@ -376,6 +372,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
|
||||
@@ -77,7 +77,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -136,7 +135,7 @@ func init() {
|
||||
Name: "sharefile",
|
||||
Description: "Citrix Sharefile",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
oauthConfig := newOauthConfig("")
|
||||
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
||||
if auth == nil || auth.Form == nil {
|
||||
@@ -157,8 +156,9 @@ func init() {
|
||||
}
|
||||
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "upload_cutoff",
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -76,17 +75,17 @@ func init() {
|
||||
Name: "sugarsync",
|
||||
Description: "Sugarsync",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read options: %v", err)
|
||||
return errors.Wrap(err, "failed to read options")
|
||||
}
|
||||
|
||||
if opt.RefreshToken != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
fmt.Printf("Username (email address)> ")
|
||||
@@ -114,10 +113,11 @@ func init() {
|
||||
// return shouldRetry(ctx, resp, err)
|
||||
//})
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get token: %v", err)
|
||||
return errors.Wrap(err, "failed to get token")
|
||||
}
|
||||
opt.RefreshToken = resp.Header.Get("Location")
|
||||
m.Set("refresh_token", opt.RefreshToken)
|
||||
return nil
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "app_id",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -42,7 +41,7 @@ func init() {
|
||||
Name: "tardigrade",
|
||||
Description: "Tardigrade Decentralized Cloud Storage",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) error {
|
||||
provider, _ := configMapper.Get(fs.ConfigProvider)
|
||||
|
||||
config.FileDeleteKey(name, fs.ConfigProvider)
|
||||
@@ -54,7 +53,7 @@ func init() {
|
||||
|
||||
// satelliteString contains always default and passphrase can be empty
|
||||
if apiKey == "" {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
satellite, found := satMap[satelliteString]
|
||||
@@ -64,12 +63,12 @@ func init() {
|
||||
|
||||
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't create access grant: %v", err)
|
||||
return errors.Wrap(err, "couldn't create access grant")
|
||||
}
|
||||
|
||||
serializedAccess, err := access.Serialize()
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't serialize access grant: %v", err)
|
||||
return errors.Wrap(err, "couldn't serialize access grant")
|
||||
}
|
||||
configMapper.Set("satellite_address", satellite)
|
||||
configMapper.Set("access_grant", serializedAccess)
|
||||
@@ -78,8 +77,9 @@ func init() {
|
||||
config.FileDeleteKey(name, "api_key")
|
||||
config.FileDeleteKey(name, "passphrase")
|
||||
} else {
|
||||
log.Fatalf("Invalid provider type: %s", provider)
|
||||
return errors.Errorf("invalid provider type: %s", provider)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: []fs.Option{
|
||||
{
|
||||
|
||||
@@ -10,9 +10,7 @@ package webdav
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/tls"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -20,7 +18,6 @@ import (
|
||||
"net/url"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -37,7 +34,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -117,14 +113,6 @@ func init() {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: configEncodingHelp,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading (Nextcloud only)
|
||||
|
||||
Set to 0 to disable chunked uploading.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: fs.SizeSuffix(0), // off by default
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -138,7 +126,6 @@ type Options struct {
|
||||
BearerToken string `config:"bearer_token"`
|
||||
BearerTokenCommand string `config:"bearer_token_command"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@@ -149,7 +136,6 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL // URL of the host
|
||||
endpointURL string // endpoint as a string
|
||||
uploadURL string // upload URL for nextcloud chunked
|
||||
srv *rest.Client // the connection to the one drive server
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
precision time.Duration // mod time precision
|
||||
@@ -160,7 +146,6 @@ type Fs struct {
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
ntlmAuthMu sync.Mutex // mutex to serialize NTLM auth roundtrips
|
||||
canChunk bool // set if nextcloud and chunk_size is set
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -472,12 +457,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// set the chunk size for testing
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
return
|
||||
}
|
||||
|
||||
// sets the BearerToken up
|
||||
func (f *Fs) setBearerToken(token string) {
|
||||
f.opt.BearerToken = token
|
||||
@@ -521,8 +500,6 @@ func (f *Fs) fetchAndSetBearerToken() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var matchNextcloudURL = regexp.MustCompile(`^.*/dav/files/[^/]+/?$`)
|
||||
|
||||
// setQuirks adjusts the Fs for the vendor passed in
|
||||
func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
switch vendor {
|
||||
@@ -536,12 +513,6 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasSHA1 = true
|
||||
f.canChunk = true
|
||||
if f.opt.ChunkSize != 0 && !matchNextcloudURL.MatchString(f.endpointURL) {
|
||||
return errors.New("chunked upload with nextcloud must use /dav/files/USER endpoint not /webdav")
|
||||
}
|
||||
f.uploadURL = strings.Replace(f.endpointURL, "/dav/files/", "/dav/uploads/", 1)
|
||||
fs.Logf(nil, f.uploadURL)
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -985,7 +956,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy mkParentDir failed")
|
||||
return nil, errors.Wrap(err, "Copy mkParentDir failed")
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||
if err != nil {
|
||||
@@ -1009,11 +980,11 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy call failed")
|
||||
return nil, errors.Wrap(err, "Copy call failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "copy NewObject failed")
|
||||
return nil, errors.Wrap(err, "Copy NewObject failed")
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
@@ -1076,18 +1047,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
if err != fs.ErrorDirNotFound {
|
||||
return errors.Wrap(err, "dirMove dirExists dst failed")
|
||||
return errors.Wrap(err, "DirMove dirExists dst failed")
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "dirMove mkParentDir dst failed")
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
destinationURL, err := rest.URLJoin(f.endpoint, dstPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "dirMove couldn't join URL")
|
||||
return errors.Wrap(err, "DirMove couldn't join URL")
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
@@ -1105,7 +1076,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "dirMove MOVE call failed")
|
||||
return errors.Wrap(err, "DirMove MOVE call failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1288,67 +1259,39 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
err = o.fs.mkParentDir(ctx, o.filePath())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update mkParentDir failed")
|
||||
return errors.Wrap(err, "Update mkParentDir failed")
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
if o.fs.canChunk && o.fs.opt.ChunkSize > 0 && size > int64(o.fs.opt.ChunkSize) {
|
||||
err = o.updateChunked(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
contentType := fs.MimeType(ctx, src)
|
||||
filePath := o.filePath()
|
||||
extraHeaders := o.extraHeaders(ctx, src)
|
||||
err = o.updateSimple(ctx, in, filePath, size, contentType, extraHeaders, o.fs.endpointURL, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: o.filePath(),
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Options: options,
|
||||
}
|
||||
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string]string {
|
||||
extraHeaders := map[string]string{}
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasSHA1 {
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
extraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
}
|
||||
}
|
||||
if o.fs.hasMD5 && extraHeaders["OC-Checksum"] == "" {
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
extraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
}
|
||||
return extraHeaders
|
||||
}
|
||||
|
||||
// Standard update
|
||||
func (o *Object) updateSimple(ctx context.Context, in io.Reader, filePath string, size int64, contentType string, extraHeaders map[string]string, rootURL string, options ...fs.OpenOption) (err error) {
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: filePath,
|
||||
Body: in,
|
||||
NoResponse: true,
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: contentType,
|
||||
Options: options,
|
||||
ExtraHeaders: extraHeaders,
|
||||
RootURL: rootURL,
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
@@ -1364,85 +1307,9 @@ func (o *Object) updateSimple(ctx context.Context, in io.Reader, filePath string
|
||||
_ = o.Remove(ctx)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Chunked update for Nextcloud (see
|
||||
// https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html)
|
||||
func (o *Object) updateChunked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
hasher := md5.New()
|
||||
_, err = hasher.Write([]byte(o.filePath()))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "chunked upload couldn't hash URL")
|
||||
}
|
||||
uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
|
||||
fs.Debugf(src, "Starting multipart upload to temp dir %q", uploadDir)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "MKCOL",
|
||||
Path: uploadDir + "/",
|
||||
NoResponse: true,
|
||||
RootURL: o.fs.uploadURL,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "making upload directory failed")
|
||||
}
|
||||
defer atexit.OnError(&err, func() {
|
||||
// Try to abort the upload, but ignore the error.
|
||||
fs.Debugf(src, "Cancelling chunked upload")
|
||||
_ = o.fs.Purge(ctx, uploadDir)
|
||||
})()
|
||||
|
||||
var (
|
||||
size = src.Size()
|
||||
uploadedSize = int64(0)
|
||||
partObj = &Object{
|
||||
fs: o.fs,
|
||||
}
|
||||
)
|
||||
for uploadedSize < size {
|
||||
// Upload chunk
|
||||
contentLength := int64(partObj.fs.opt.ChunkSize)
|
||||
if size-uploadedSize < contentLength {
|
||||
contentLength = size - uploadedSize
|
||||
}
|
||||
partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, uploadedSize, uploadedSize+contentLength)
|
||||
extraHeaders := map[string]string{}
|
||||
err = partObj.updateSimple(ctx, io.LimitReader(in, int64(partObj.fs.opt.ChunkSize)), partObj.remote, contentLength, "", extraHeaders, o.fs.uploadURL, options...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "uploading chunk failed")
|
||||
}
|
||||
uploadedSize += contentLength
|
||||
}
|
||||
|
||||
// Finish
|
||||
var resp *http.Response
|
||||
opts = rest.Opts{
|
||||
Method: "MOVE",
|
||||
Path: o.fs.filePath(path.Join(uploadDir, ".file")),
|
||||
NoResponse: true,
|
||||
Options: options,
|
||||
RootURL: o.fs.uploadURL,
|
||||
}
|
||||
destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "finalize chunked upload couldn't join URL")
|
||||
}
|
||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return o.fs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "finalize chunked upload failed")
|
||||
}
|
||||
return nil
|
||||
// read metadata from remote
|
||||
o.hasMetaData = false
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
// Test Webdav filesystem interface
|
||||
package webdav
|
||||
package webdav_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/backend/webdav"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
@@ -13,10 +13,7 @@ import (
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNextcloud:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: 1 * fs.MebiByte,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -27,10 +24,7 @@ func TestIntegration2(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavOwncloud:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -41,10 +35,7 @@ func TestIntegration3(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavRclone:",
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
Skip: true,
|
||||
},
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -55,10 +46,6 @@ func TestIntegration4(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestWebdavNTLM:",
|
||||
NilObject: (*Object)(nil),
|
||||
NilObject: (*webdav.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
@@ -60,12 +60,12 @@ func init() {
|
||||
Name: "yandex",
|
||||
Description: "Yandex Disk",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
err := oauthutil.Config(ctx, "yandex", name, m, oauthConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: config.ConfigEncoding,
|
||||
@@ -251,22 +251,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't read OAuth token (this should never happen).")
|
||||
return nil, errors.Wrap(err, "couldn't read OAuth token")
|
||||
}
|
||||
if token.RefreshToken == "" {
|
||||
log.Fatalf("Unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
|
||||
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
|
||||
}
|
||||
if token.TokenType != "OAuth" {
|
||||
token.TokenType = "OAuth"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't save OAuth token (this should never happen).")
|
||||
return nil, errors.Wrap(err, "couldn't save OAuth token")
|
||||
}
|
||||
log.Printf("Automatically upgraded OAuth config.")
|
||||
}
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Yandex: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Yandex")
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -73,37 +72,41 @@ func init() {
|
||||
Name: "zoho",
|
||||
Description: "Zoho",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
// Need to setup region before configuring oauth
|
||||
setupRegion(m)
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opt := oauthutil.Options{
|
||||
// No refresh token unless ApprovalForce is set
|
||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||
}
|
||||
if err := oauthutil.Config(ctx, "zoho", name, m, oauthConfig, &opt); err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||
// it's own custom type
|
||||
token, err := oauthutil.GetToken(name, m)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read token: %v", err)
|
||||
return errors.Wrap(err, "failed to read token")
|
||||
}
|
||||
if token.TokenType != "Zoho-oauthtoken" {
|
||||
token.TokenType = "Zoho-oauthtoken"
|
||||
err = oauthutil.PutToken(name, m, token, false)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
return errors.Wrap(err, "failed to configure token")
|
||||
}
|
||||
}
|
||||
|
||||
if fs.GetConfig(ctx).AutoConfirm {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if err = setupRoot(ctx, name, m); err != nil {
|
||||
log.Fatalf("Failed to configure root directory: %v", err)
|
||||
return errors.Wrap(err, "failed to configure root directory")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
@@ -164,15 +167,16 @@ type Object struct {
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
func setupRegion(m configmap.Mapper) {
|
||||
func setupRegion(m configmap.Mapper) error {
|
||||
region, ok := m.Get("region")
|
||||
if !ok || region == "" {
|
||||
log.Fatalf("No region set\n")
|
||||
return errors.New("no region set")
|
||||
}
|
||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -208,7 +212,7 @@ func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api
|
||||
func setupRoot(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
return errors.Wrap(err, "failed to load oAuthClient")
|
||||
}
|
||||
authSrv := rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||
opts := rest.Opts{
|
||||
@@ -377,7 +381,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err := configstruct.Set(m, opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
setupRegion(m)
|
||||
err := setupRegion(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = parsePath(root)
|
||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
|
||||
@@ -400,7 +400,10 @@ func initConfig() {
|
||||
configflags.SetFlags(ci)
|
||||
|
||||
// Load the config
|
||||
configfile.LoadConfig(ctx)
|
||||
err := configfile.LoadConfig(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load config: %v", err)
|
||||
}
|
||||
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
@@ -411,7 +414,7 @@ func initConfig() {
|
||||
}
|
||||
|
||||
// Load filters
|
||||
err := filterflags.Reload(ctx)
|
||||
err = filterflags.Reload(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load filters: %v", err)
|
||||
}
|
||||
|
||||
@@ -42,9 +42,9 @@ var configCommand = &cobra.Command{
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.EditConfig(context.Background())
|
||||
return config.EditConfig(context.Background())
|
||||
},
|
||||
}
|
||||
|
||||
@@ -272,8 +272,7 @@ This normally means going through the interactive oauth flow again.
|
||||
if fsInfo.Config == nil {
|
||||
return errors.Errorf("%s: doesn't support Reconnect", configName)
|
||||
}
|
||||
fsInfo.Config(ctx, configName, config)
|
||||
return nil
|
||||
return fsInfo.Config(ctx, configName, config)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
configfile.LoadConfig(ctx)
|
||||
require.NoError(t, configfile.LoadConfig(ctx))
|
||||
mount := rc.Calls.Get("mount/mount")
|
||||
assert.NotNil(t, mount)
|
||||
unmount := rc.Calls.Get("mount/unmount")
|
||||
|
||||
@@ -41,7 +41,7 @@ func startServer(t *testing.T, f fs.Fs) {
|
||||
}
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
configfile.LoadConfig(context.Background())
|
||||
require.NoError(t, configfile.LoadConfig(context.Background()))
|
||||
|
||||
f, err := fs.NewFs(context.Background(), "testdata/files")
|
||||
l, _ := f.List(context.Background(), "")
|
||||
|
||||
@@ -61,7 +61,7 @@ var (
|
||||
func TestInit(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Configure the remote
|
||||
configfile.LoadConfig(context.Background())
|
||||
require.NoError(t, configfile.LoadConfig(context.Background()))
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
|
||||
@@ -66,7 +66,7 @@ func createOverwriteDeleteSeq(t testing.TB, path string) []TestRequest {
|
||||
// TestResticHandler runs tests on the restic handler code, especially in append-only mode.
|
||||
func TestResticHandler(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
configfile.LoadConfig(ctx)
|
||||
require.NoError(t, configfile.LoadConfig(ctx))
|
||||
buf := make([]byte, 32)
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -60,7 +60,10 @@ func Authorize(ctx context.Context, args []string, noAutoBrowser bool) error {
|
||||
m.AddSetter(outM)
|
||||
m.AddGetter(outM, configmap.PriorityNormal)
|
||||
|
||||
ri.Config(ctx, name, m)
|
||||
err = ri.Config(ctx, name, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Print the code for the user to paste
|
||||
out := outM["token"]
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
mathrand "math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -328,7 +327,7 @@ func SetConfigPath(path string) (err error) {
|
||||
}
|
||||
|
||||
// LoadConfig loads the config file
|
||||
func LoadConfig(ctx context.Context) {
|
||||
func LoadConfig(ctx context.Context) error {
|
||||
// Set RCLONE_CONFIG_DIR for backend config and subprocesses
|
||||
// If empty configPath (in-memory only) the value will be "."
|
||||
_ = os.Setenv("RCLONE_CONFIG_DIR", filepath.Dir(configPath))
|
||||
@@ -340,10 +339,12 @@ func LoadConfig(ctx context.Context) {
|
||||
fs.Logf(nil, "Config file %q not found - using defaults", configPath)
|
||||
}
|
||||
} else if err != nil {
|
||||
log.Fatalf("Failed to load config file %q: %v", configPath, err)
|
||||
fs.Errorf(nil, "Failed to load config file %q: %v", configPath, err)
|
||||
return errors.Wrap(err, "failed to load config file")
|
||||
} else {
|
||||
fs.Debugf(nil, "Using config file from %q", configPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ErrorConfigFileNotFound is returned when the config file is not found
|
||||
@@ -437,7 +438,10 @@ func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, doObscu
|
||||
}
|
||||
Data.SetValue(name, k, vStr)
|
||||
}
|
||||
RemoteConfig(ctx, name)
|
||||
err = RemoteConfig(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
SaveConfig()
|
||||
cache.ClearConfig(name) // remove any remotes based on this config from the cache
|
||||
return nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigLoad(t *testing.T) {
|
||||
@@ -18,7 +19,7 @@ func TestConfigLoad(t *testing.T) {
|
||||
assert.NoError(t, config.SetConfigPath(oldConfigPath))
|
||||
}()
|
||||
config.ClearConfigPassword()
|
||||
configfile.LoadConfig(context.Background())
|
||||
require.NoError(t, configfile.LoadConfig(context.Background()))
|
||||
sections := config.Data.GetSectionList()
|
||||
var expect = []string{"RCLONE_ENCRYPT_V0", "nounc", "unc"}
|
||||
assert.Equal(t, expect, sections)
|
||||
|
||||
@@ -16,9 +16,9 @@ import (
|
||||
)
|
||||
|
||||
// LoadConfig installs the config file handler and calls config.LoadConfig
|
||||
func LoadConfig(ctx context.Context) {
|
||||
func LoadConfig(ctx context.Context) error {
|
||||
config.Data = &Storage{}
|
||||
config.LoadConfig(ctx)
|
||||
return config.LoadConfig(ctx)
|
||||
}
|
||||
|
||||
// Storage implements config.Storage for saving and loading config
|
||||
|
||||
@@ -18,7 +18,7 @@ const testName = "configTestNameForRc"
|
||||
|
||||
func TestRc(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
configfile.LoadConfig(ctx)
|
||||
require.NoError(t, configfile.LoadConfig(ctx))
|
||||
// Create the test remote
|
||||
call := rc.Calls.Get("config/create")
|
||||
assert.NotNil(t, call)
|
||||
|
||||
@@ -270,13 +270,14 @@ func OkRemote(name string) bool {
|
||||
}
|
||||
|
||||
// RemoteConfig runs the config helper for the remote if needed
|
||||
func RemoteConfig(ctx context.Context, name string) {
|
||||
func RemoteConfig(ctx context.Context, name string) error {
|
||||
fmt.Printf("Remote config\n")
|
||||
f := mustFindByName(name)
|
||||
if f.Config != nil {
|
||||
m := fs.ConfigMap(f, name, nil)
|
||||
f.Config(ctx, name, m)
|
||||
return f.Config(ctx, name, m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// matchProvider returns true if provider matches the providerConfig string.
|
||||
@@ -456,7 +457,7 @@ func editOptions(ri *fs.RegInfo, name string, isNew bool) {
|
||||
}
|
||||
|
||||
// NewRemote make a new remote from its name
|
||||
func NewRemote(ctx context.Context, name string) {
|
||||
func NewRemote(ctx context.Context, name string) error {
|
||||
var (
|
||||
newType string
|
||||
ri *fs.RegInfo
|
||||
@@ -476,16 +477,19 @@ func NewRemote(ctx context.Context, name string) {
|
||||
Data.SetValue(name, "type", newType)
|
||||
|
||||
editOptions(ri, name, true)
|
||||
RemoteConfig(ctx, name)
|
||||
err = RemoteConfig(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if OkRemote(name) {
|
||||
SaveConfig()
|
||||
return
|
||||
return nil
|
||||
}
|
||||
EditRemote(ctx, ri, name)
|
||||
return EditRemote(ctx, ri, name)
|
||||
}
|
||||
|
||||
// EditRemote gets the user to edit a remote
|
||||
func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) {
|
||||
func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) error {
|
||||
ShowRemote(name)
|
||||
fmt.Printf("Edit remote\n")
|
||||
for {
|
||||
@@ -495,7 +499,7 @@ func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) {
|
||||
}
|
||||
}
|
||||
SaveConfig()
|
||||
RemoteConfig(ctx, name)
|
||||
return RemoteConfig(ctx, name)
|
||||
}
|
||||
|
||||
// DeleteRemote gets the user to delete a remote
|
||||
@@ -560,7 +564,7 @@ func ShowConfig() {
|
||||
}
|
||||
|
||||
// EditConfig edits the config file interactively
|
||||
func EditConfig(ctx context.Context) {
|
||||
func EditConfig(ctx context.Context) (err error) {
|
||||
for {
|
||||
haveRemotes := len(Data.GetSectionList()) != 0
|
||||
what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"}
|
||||
@@ -577,9 +581,15 @@ func EditConfig(ctx context.Context) {
|
||||
case 'e':
|
||||
name := ChooseRemote()
|
||||
fs := mustFindByName(name)
|
||||
EditRemote(ctx, fs, name)
|
||||
err = EditRemote(ctx, fs, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case 'n':
|
||||
NewRemote(ctx, NewRemoteName())
|
||||
err = NewRemote(ctx, NewRemoteName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case 'd':
|
||||
name := ChooseRemote()
|
||||
DeleteRemote(name)
|
||||
@@ -590,8 +600,7 @@ func EditConfig(ctx context.Context) {
|
||||
case 's':
|
||||
SetPassword()
|
||||
case 'q':
|
||||
return
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func testConfigFile(t *testing.T, configFileName string) func() {
|
||||
assert.NoError(t, config.SetConfigPath(path))
|
||||
ci = &fs.ConfigInfo{}
|
||||
|
||||
configfile.LoadConfig(ctx)
|
||||
require.NoError(t, configfile.LoadConfig(ctx))
|
||||
assert.Equal(t, []string{}, config.Data.GetSectionList())
|
||||
|
||||
// Fake a remote
|
||||
@@ -103,7 +103,7 @@ func TestCRUD(t *testing.T) {
|
||||
"secret", // repeat
|
||||
"y", // looks good, save
|
||||
})
|
||||
config.NewRemote(ctx, "test")
|
||||
require.NoError(t, config.NewRemote(ctx, "test"))
|
||||
|
||||
assert.Equal(t, []string{"test"}, config.Data.GetSectionList())
|
||||
assert.Equal(t, "config_test_remote", config.FileGet("test", "type"))
|
||||
@@ -146,7 +146,7 @@ func TestChooseOption(t *testing.T) {
|
||||
assert.Equal(t, 1024, bits)
|
||||
return "not very random password", nil
|
||||
}
|
||||
config.NewRemote(ctx, "test")
|
||||
require.NoError(t, config.NewRemote(ctx, "test"))
|
||||
|
||||
assert.Equal(t, "false", config.FileGet("test", "bool"))
|
||||
assert.Equal(t, "not very random password", obscure.MustReveal(config.FileGet("test", "pass")))
|
||||
@@ -158,7 +158,7 @@ func TestChooseOption(t *testing.T) {
|
||||
"n", // not required
|
||||
"y", // looks good, save
|
||||
})
|
||||
config.NewRemote(ctx, "test")
|
||||
require.NoError(t, config.NewRemote(ctx, "test"))
|
||||
|
||||
assert.Equal(t, "true", config.FileGet("test", "bool"))
|
||||
assert.Equal(t, "", config.FileGet("test", "pass"))
|
||||
@@ -175,7 +175,7 @@ func TestNewRemoteName(t *testing.T) {
|
||||
"n", // not required
|
||||
"y", // looks good, save
|
||||
})
|
||||
config.NewRemote(ctx, "test")
|
||||
require.NoError(t, config.NewRemote(ctx, "test"))
|
||||
|
||||
config.ReadLine = makeReadLine([]string{
|
||||
"test", // already exists
|
||||
|
||||
2
fs/fs.go
2
fs/fs.go
@@ -89,7 +89,7 @@ type RegInfo struct {
|
||||
// the parent of that object and ErrorIsFile.
|
||||
NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
|
||||
// Function to call to help with config
|
||||
Config func(ctx context.Context, name string, config configmap.Mapper) `json:"-"`
|
||||
Config func(ctx context.Context, name string, config configmap.Mapper) error `json:"-"`
|
||||
// Options for the Fs configuration
|
||||
Options Options
|
||||
// The command help, if any
|
||||
|
||||
@@ -103,7 +103,7 @@ type testRun struct {
|
||||
// Run a suite of tests
|
||||
func testServer(t *testing.T, tests []testRun, opt *rc.Options) {
|
||||
ctx := context.Background()
|
||||
configfile.LoadConfig(ctx)
|
||||
require.NoError(t, configfile.LoadConfig(ctx))
|
||||
mux := http.NewServeMux()
|
||||
opt.HTTPOptions.Template = testTemplate
|
||||
rcServer := newServer(ctx, opt, mux)
|
||||
|
||||
@@ -71,7 +71,10 @@ func Initialise() {
|
||||
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
|
||||
_ = config.SetConfigPath(envConfig)
|
||||
}
|
||||
configfile.LoadConfig(ctx)
|
||||
err := configfile.LoadConfig(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Initialise failed to load config: %v", err)
|
||||
}
|
||||
accounting.Start(ctx)
|
||||
if *Verbose {
|
||||
ci.LogLevel = fs.LogLevelDebug
|
||||
|
||||
@@ -61,8 +61,6 @@ type ChunkedUploadConfig struct {
|
||||
CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix
|
||||
// More than one chunk is required on upload
|
||||
NeedMultipleChunks bool
|
||||
// Skip this particular remote
|
||||
Skip bool
|
||||
}
|
||||
|
||||
// SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime
|
||||
@@ -1779,10 +1777,6 @@ func Run(t *testing.T, opt *Opt) {
|
||||
t.Skip("not running with -short")
|
||||
}
|
||||
|
||||
if opt.ChunkedUpload.Skip {
|
||||
t.Skip("skipping as ChunkedUpload.Skip is set")
|
||||
}
|
||||
|
||||
setUploadChunkSizer, _ := f.(SetUploadChunkSizer)
|
||||
if setUploadChunkSizer == nil {
|
||||
t.Skipf("%T does not implement SetUploadChunkSizer", f)
|
||||
|
||||
@@ -72,7 +72,10 @@ func main() {
|
||||
log.Println("test_all should be run from the root of the rclone source code")
|
||||
log.Fatal(err)
|
||||
}
|
||||
configfile.LoadConfig(context.Background())
|
||||
err = configfile.LoadConfig(context.Background())
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load config: %v", err)
|
||||
}
|
||||
|
||||
// Seed the random number generator
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
|
||||
@@ -17,10 +17,11 @@ start() {
|
||||
nextcloud:latest
|
||||
|
||||
echo type=webdav
|
||||
echo url=http://$(docker_ip)/remote.php/dav/files/$USER/
|
||||
echo url=http://$(docker_ip)/remote.php/webdav/
|
||||
echo user=$USER
|
||||
echo pass=$(rclone obscure $PASS)
|
||||
echo vendor=nextcloud
|
||||
# the tests don't pass if we use the nextcloud features
|
||||
# echo vendor=nextcloud
|
||||
echo _connect=$(docker_ip):80
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user