1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-11 13:53:15 +00:00

Compare commits

..

1 Commits

244 changed files with 4260 additions and 11599 deletions

View File

@@ -37,6 +37,7 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -64,11 +65,3 @@ The Rclone Developers
#### A log from the command with the `-vv` flag (e.g. output from `rclone -vv copy /tmp remote:tmp`)
<!--- Please keep the note below for others who read your bug report. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -26,6 +26,7 @@ The Rclone Developers
-->
#### The associated forum post URL from `https://forum.rclone.org`
@@ -41,11 +42,3 @@ The Rclone Developers
#### How do you think rclone should be changed to solve that?
<!--- Please keep the note below for others who read your feature request. -->
#### How to use GitHub
* Please use the 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to show that you are affected by the same issue.
* Please don't comment if you have no relevant information to add. It's just extra noise for everyone subscribed to this issue.
* Subscribe to receive notifications on status change and new comments.

View File

@@ -12,15 +12,9 @@ on:
tags:
- '*'
pull_request:
workflow_dispatch:
inputs:
manual:
required: true
default: true
jobs:
build:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 60
strategy:
fail-fast: false
@@ -36,7 +30,6 @@ jobs:
check: true
quicktest: true
racequicktest: true
librclonetest: true
deploy: true
- job_name: mac_amd64
@@ -194,14 +187,6 @@ jobs:
make racequicktest
if: matrix.racequicktest
- name: Run librclone tests
shell: bash
run: |
make -C librclone/ctest test
make -C librclone/ctest clean
librclone/python/test_rclone.py
if: matrix.librclonetest
- name: Code quality test
shell: bash
run: |
@@ -229,7 +214,6 @@ jobs:
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
name: "android-all"
runs-on: ubuntu-latest
@@ -237,8 +221,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go 1.14
@@ -267,15 +249,6 @@ jobs:
run: |
make
- name: install gomobile
run: |
go get golang.org/x/mobile/cmd/gobind
go get golang.org/x/mobile/cmd/gomobile
env PATH=$PATH:~/go/bin gomobile init
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |

View File

@@ -7,7 +7,6 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:

View File

@@ -6,7 +6,6 @@ on:
jobs:
build:
if: github.repository == 'rclone/rclone'
runs-on: ubuntu-latest
name: Build image job
steps:

2
.gitignore vendored
View File

@@ -11,5 +11,3 @@ rclone.iml
*.log
*.iml
fuzz-build.zip
*.orig
*.rej

View File

@@ -62,7 +62,6 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
@@ -88,6 +87,7 @@ Please see [the full list of all storage providers and their features](https://r
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
* Optional transparent compression ([Compress](https://rclone.org/compress/))
* Optional encryption ([Crypt](https://rclone.org/crypt/))
* Optional cache ([Cache](https://rclone.org/cache/))
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
* Multi-threaded downloads to local disk
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna

View File

@@ -20,7 +20,7 @@ var (
)
func prepare(t *testing.T, root string) {
configfile.Install()
configfile.LoadConfig(context.Background())
// Configure the remote
config.FileSet(remoteName, "type", "alias")

View File

@@ -41,7 +41,6 @@ import (
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"
_ "github.com/rclone/rclone/backend/yandex"
_ "github.com/rclone/rclone/backend/zoho"

View File

@@ -16,6 +16,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"path"
"strings"
@@ -69,10 +70,11 @@ func init() {
Prefix: "acd",
Description: "Amazon Drive",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: acdConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "amazon cloud drive", name, m, acdConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "checkpoint",
@@ -81,16 +83,16 @@ func init() {
Advanced: true,
}, {
Name: "upload_wait_per_gb",
Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
Help: `Additional time per GB to wait after a failed complete upload to see if it appears.
Sometimes Amazon Drive gives an error when a file has been fully
uploaded but the file appears anyway after a little while. This
happens sometimes for files over 1 GiB in size and nearly every time for
files bigger than 10 GiB. This parameter controls the time rclone waits
happens sometimes for files over 1GB in size and nearly every time for
files bigger than 10GB. This parameter controls the time rclone waits
for the file to appear.
The default value for this parameter is 3 minutes per GiB, so by
default it will wait 3 minutes for every GiB uploaded to see if the
The default value for this parameter is 3 minutes per GB, so by
default it will wait 3 minutes for every GB uploaded to see if the
file appears.
You can disable this feature by setting it to 0. This may cause
@@ -110,7 +112,7 @@ in this situation.`,
Files this size or more will be downloaded via their "tempLink". This
is to work around a problem with Amazon Drive which blocks downloads
of files bigger than about 10 GiB. The default for this is 9 GiB which
of files bigger than about 10GB. The default for this is 9GB which
shouldn't need to be changed.
To download files above this threshold, rclone requests a "tempLink"

View File

@@ -47,8 +47,8 @@ const (
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
defaultChunkSize = 4 * fs.MebiByte
maxChunkSize = 100 * fs.MebiByte
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
@@ -129,11 +129,11 @@ msi_client_id, or msi_mi_res_id parameters.`,
Advanced: true,
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to chunked upload (<= 256 MiB). (Deprecated)",
Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)",
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size (<= 100 MiB).
Help: `Upload chunk size (<= 100MB).
Note that this is stored in memory and there may be up to
"--transfers" chunks stored at once in memory.`,
@@ -404,7 +404,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}

View File

@@ -2,11 +2,12 @@ package api
import (
"fmt"
"path"
"strconv"
"strings"
"time"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/version"
)
// Error describes a B2 error response
@@ -62,17 +63,16 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
return nil
}
// HasVersion returns true if it looks like the passed filename has a timestamp on it.
//
// Note that the passed filename's timestamp may still be invalid even if this
// function returns true.
func HasVersion(remote string) bool {
return version.Match(remote)
}
const versionFormat = "-v2006-01-02-150405.000"
// AddVersion adds the timestamp as a version string into the filename passed in.
func (t Timestamp) AddVersion(remote string) string {
return version.Add(remote, time.Time(t))
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
s := time.Time(t).Format(versionFormat)
// Replace the '.' with a '-'
s = strings.Replace(s, ".", "-", -1)
return base + s + ext
}
// RemoveVersion removes the timestamp from a filename as a version string.
@@ -80,9 +80,24 @@ func (t Timestamp) AddVersion(remote string) string {
// It returns the new file name and a timestamp, or the old filename
// and a zero timestamp.
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
time, newRemote := version.Remove(remote)
t = Timestamp(time)
return
newRemote = remote
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
if len(base) < len(versionFormat) {
return
}
versionStart := len(base) - len(versionFormat)
// Check it ends in -xxx
if base[len(base)-4] != '-' {
return
}
// Replace with .xxx for parsing
base = base[:len(base)-4] + "." + base[len(base)-3:]
newT, err := time.Parse(versionFormat, base[versionStart:])
if err != nil {
return
}
return Timestamp(newT), base[:versionStart] + ext
}
// IsZero returns true if the timestamp is uninitialized

View File

@@ -13,6 +13,7 @@ import (
var (
emptyT api.Timestamp
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
)
@@ -35,6 +36,40 @@ func TestTimestampUnmarshalJSON(t *testing.T) {
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
}
func TestTimestampAddVersion(t *testing.T) {
for _, test := range []struct {
t api.Timestamp
in string
expected string
}{
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
{t1, "potato", "potato-v2001-02-03-040506-123"},
{t1, "", "-v2001-02-03-040506-123"},
} {
actual := test.t.AddVersion(test.in)
assert.Equal(t, test.expected, actual, test.in)
}
}
func TestTimestampRemoveVersion(t *testing.T) {
for _, test := range []struct {
in string
expectedT api.Timestamp
expectedRemote string
}{
{"potato.txt", emptyT, "potato.txt"},
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
{"potato-v2001-02-03-040506-123", t1, "potato"},
{"-v2001-02-03-040506-123", t1, ""},
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
} {
actualT, actualRemote := api.RemoveVersion(test.in)
assert.Equal(t, test.expectedT, actualT, test.in)
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
}
}
func TestTimestampIsZero(t *testing.T) {
assert.True(t, emptyT.IsZero())
assert.False(t, t0.IsZero())

View File

@@ -54,10 +54,10 @@ const (
decayConstant = 1 // bigger for slower decay, exponential
maxParts = 10000
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
minChunkSize = 5 * fs.MebiByte
defaultChunkSize = 96 * fs.MebiByte
defaultUploadCutoff = 200 * fs.MebiByte
largeFileCopyCutoff = 4 * fs.GibiByte // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
@@ -116,7 +116,7 @@ in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration
Files above this size will be uploaded in chunks of "--b2-chunk-size".
This value should be set no larger than 4.657 GiB (== 5 GB).`,
This value should be set no larger than 4.657GiB (== 5GB).`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
@@ -126,7 +126,7 @@ This value should be set no larger than 4.657 GiB (== 5 GB).`,
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 4.6 GiB.`,
The minimum is 0 and the maximum is 4.6GB.`,
Default: largeFileCopyCutoff,
Advanced: true,
}, {
@@ -1353,7 +1353,7 @@ func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string
}
var request = api.GetDownloadAuthorizationRequest{
BucketID: bucketID,
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
FileNamePrefix: f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
ValidDurationInSeconds: validDurationInSeconds,
}
var response api.GetDownloadAuthorizationResponse

View File

@@ -230,14 +230,14 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
//
// The number of bytes in the file being uploaded. Note that
// this header is required; you cannot leave it out and just
// use chunked encoding. The minimum size of every part but
// the last one is 100 MB (100,000,000 bytes)
// use chunked encoding. The minimum size of every part but
// the last one is 100MB.
//
// X-Bz-Content-Sha1
//
// The SHA1 checksum of the this part of the file. B2 will
// check this when the part is uploaded, to make sure that the
// data arrived correctly. The same SHA1 checksum must be
// data arrived correctly. The same SHA1 checksum must be
// passed to b2_finish_large_file.
opts := rest.Opts{
Method: "POST",

View File

@@ -36,13 +36,13 @@ func (t *Time) UnmarshalJSON(data []byte) error {
// Error is returned from box when things go wrong
type Error struct {
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage `json:"context_info"`
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
Type string `json:"type"`
Status int `json:"status"`
Code string `json:"code"`
ContextInfo json.RawMessage
HelpURL string `json:"help_url"`
Message string `json:"message"`
RequestID string `json:"request_id"`
}
// Error returns a string for the error and satisfies the error interface
@@ -132,38 +132,6 @@ type UploadFile struct {
ContentModifiedAt Time `json:"content_modified_at"`
}
// PreUploadCheck is the request for upload preflight check
type PreUploadCheck struct {
Name string `json:"name"`
Parent Parent `json:"parent"`
Size *int64 `json:"size,omitempty"`
}
// PreUploadCheckResponse is the response from upload preflight check
// if successful
type PreUploadCheckResponse struct {
UploadToken string `json:"upload_token"`
UploadURL string `json:"upload_url"`
}
// PreUploadCheckConflict is returned in the ContextInfo error field
// from PreUploadCheck when the error code is "item_name_in_use"
type PreUploadCheckConflict struct {
Conflicts struct {
Type string `json:"type"`
ID string `json:"id"`
FileVersion struct {
Type string `json:"type"`
ID string `json:"id"`
Sha1 string `json:"sha1"`
} `json:"file_version"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
Sha1 string `json:"sha1"`
Name string `json:"name"`
} `json:"conflicts"`
}
// UpdateFileModTime is used in Update File Info
type UpdateFileModTime struct {
ContentModifiedAt Time `json:"content_modified_at"`

View File

@@ -17,6 +17,7 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"path"
@@ -83,7 +84,7 @@ func init() {
Name: "box",
Description: "Box",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
@@ -92,15 +93,15 @@ func init() {
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token with jwt authentication")
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
}
// Else, if not using an access token, use oauth2
} else if boxAccessToken == "" || !boxAccessTokenOk {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
}
}
return nil, nil
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "root_folder_id",
@@ -125,7 +126,7 @@ func init() {
}},
}, {
Name: "upload_cutoff",
Help: "Cutoff for switching to multipart upload (>= 50 MiB).",
Help: "Cutoff for switching to multipart upload (>= 50MB).",
Default: fs.SizeSuffix(defaultUploadCutoff),
Advanced: true,
}, {
@@ -156,15 +157,15 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
jsonFile = env.ShellExpand(jsonFile)
boxConfig, err := getBoxConfig(jsonFile)
if err != nil {
return errors.Wrap(err, "get box config")
log.Fatalf("Failed to configure token: %v", err)
}
privateKey, err := getDecryptedPrivateKey(boxConfig)
if err != nil {
return errors.Wrap(err, "get decrypted private key")
log.Fatalf("Failed to configure token: %v", err)
}
claims, err := getClaims(boxConfig, boxSubType)
if err != nil {
return errors.Wrap(err, "get claims")
log.Fatalf("Failed to configure token: %v", err)
}
signingHeaders := getSigningHeaders(boxConfig)
queryParams := getQueryParams(boxConfig)
@@ -685,80 +686,22 @@ func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time,
return o, leaf, directoryID, nil
}
// preUploadCheck checks to see if a file can be uploaded
//
// It returns "", nil if the file is good to go
// It returns "ID", nil if the file must be updated
func (f *Fs) preUploadCheck(ctx context.Context, leaf, directoryID string, size int64) (ID string, err error) {
check := api.PreUploadCheck{
Name: f.opt.Enc.FromStandardName(leaf),
Parent: api.Parent{
ID: directoryID,
},
}
if size >= 0 {
check.Size = &size
}
opts := rest.Opts{
Method: "OPTIONS",
Path: "/files/content/",
}
var result api.PreUploadCheckResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, &check, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
if apiErr, ok := err.(*api.Error); ok && apiErr.Code == "item_name_in_use" {
var conflict api.PreUploadCheckConflict
err = json.Unmarshal(apiErr.ContextInfo, &conflict)
if err != nil {
return "", errors.Wrap(err, "pre-upload check: JSON decode failed")
}
if conflict.Conflicts.Type != api.ItemTypeFile {
return "", errors.Wrap(err, "pre-upload check: can't overwrite non file with file")
}
return conflict.Conflicts.ID, nil
}
return "", errors.Wrap(err, "pre-upload check")
}
return "", nil
}
// Put the object
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// If directory doesn't exist, file doesn't exist so can upload
remote := src.Remote()
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
if err == fs.ErrorDirNotFound {
return f.PutUnchecked(ctx, in, src, options...)
}
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src)
default:
return nil, err
}
// Preflight check the upload, which returns the ID if the
// object already exists
ID, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
if err != nil {
return nil, err
}
if ID == "" {
return f.PutUnchecked(ctx, in, src, options...)
}
// If object exists then create a skeleton one with just id
o := &Object{
fs: f,
remote: remote,
id: ID,
}
return o, o.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
@@ -1285,7 +1228,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// upload does a single non-multipart upload
//
// This is recommended for less than 50 MiB of content
// This is recommended for less than 50 MB of content
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
upload := api.UploadFile{
Name: o.fs.opt.Enc.FromStandardName(leaf),

View File

@@ -98,14 +98,14 @@ changed, any downloaded chunks will be invalid and cache-chunk-path
will need to be cleared or unexpected EOF errors will occur.`,
Default: DefCacheChunkSize,
Examples: []fs.OptionExample{{
Value: "1M",
Help: "1 MiB",
Value: "1m",
Help: "1MB",
}, {
Value: "5M",
Help: "5 MiB",
Help: "5 MB",
}, {
Value: "10M",
Help: "10 MiB",
Help: "10 MB",
}},
}, {
Name: "info_age",
@@ -132,13 +132,13 @@ oldest chunks until it goes under this value.`,
Default: DefCacheTotalChunkSize,
Examples: []fs.OptionExample{{
Value: "500M",
Help: "500 MiB",
Help: "500 MB",
}, {
Value: "1G",
Help: "1 GiB",
Help: "1 GB",
}, {
Value: "10G",
Help: "10 GiB",
Help: "10 GB",
}},
}, {
Name: "db_path",
@@ -339,14 +339,8 @@ func parseRootPath(path string) (string, error) {
return strings.Trim(path, "/"), nil
}
var warnDeprecated sync.Once
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
warnDeprecated.Do(func() {
fs.Logf(nil, "WARNING: Cache backend is deprecated and may be removed in future. Please use VFS instead.")
})
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)

View File

@@ -836,7 +836,7 @@ func newRun() *run {
if uploadDir == "" {
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
log.Fatalf("Failed to create temp dir: %v", err)
}
} else {
r.tmpUploadDir = uploadDir

View File

@@ -155,7 +155,7 @@ Normally should contain a ':' and a path, e.g. "myremote:path/to/dir",
}, {
Name: "chunk_size",
Advanced: false,
Default: fs.SizeSuffix(2147483648), // 2 GiB
Default: fs.SizeSuffix(2147483648), // 2GB
Help: `Files larger than chunk size will be split in chunks.`,
}, {
Name: "name_format",
@@ -1448,7 +1448,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
c.accountBytes(size)
return nil
}
const bufLen = 1048576 // 1 MiB
const bufLen = 1048576 // 1MB
buf := make([]byte, bufLen)
for size > 0 {
n := size

View File

@@ -33,7 +33,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
Size: int64(kilobytes) * int64(fs.Kibi),
Size: int64(kilobytes) * int64(fs.KibiByte),
})
})
}

View File

@@ -36,7 +36,7 @@ import (
// Globals
const (
initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently
maxChunkSize = 8388608 // at 256 KiB and 8 MiB.
maxChunkSize = 8388608 // at 256KB and 8 MB.
bufferSize = 8388608
heuristicBytes = 1048576
@@ -53,7 +53,7 @@ const (
Gzip = 2
)
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9-_]{11})$")
var nameRegexp = regexp.MustCompile("^(.+?)\\.([A-Za-z0-9+_]{11})$")
// Register with Fs
func init() {

View File

@@ -12,14 +12,12 @@ import (
"strconv"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/pkg/errors"
"github.com/rclone/rclone/backend/crypt/pkcs7"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/lib/version"
"github.com/rfjakob/eme"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
@@ -444,32 +442,11 @@ func (c *Cipher) encryptFileName(in string) string {
if !c.dirNameEncrypt && i != (len(segments)-1) {
continue
}
// Strip version string so that only the non-versioned part
// of the file name gets encrypted/obfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard {
segments[i] = c.encryptSegment(segments[i])
} else {
segments[i] = c.obfuscateSegment(segments[i])
}
// Add back a version to the encrypted/obfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
}
return strings.Join(segments, "/")
}
@@ -500,21 +477,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if !c.dirNameEncrypt && i != (len(segments)-1) {
continue
}
// Strip version string so that only the non-versioned part
// of the file name gets decrypted/deobfuscated
hasVersion := false
var t time.Time
if i == (len(segments)-1) && version.Match(segments[i]) {
var s string
t, s = version.Remove(segments[i])
// version.Remove can fail, in which case it returns segments[i]
if s != segments[i] {
segments[i] = s
hasVersion = true
}
}
if c.mode == NameEncryptionStandard {
segments[i], err = c.decryptSegment(segments[i])
} else {
@@ -524,12 +486,6 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
if err != nil {
return "", err
}
// Add back a version to the decrypted/deobfuscated
// file name, if we stripped it off earlier
if hasVersion {
segments[i] = version.Add(segments[i], t)
}
}
return strings.Join(segments, "/"), nil
}
@@ -538,18 +494,10 @@ func (c *Cipher) decryptFileName(in string) (string, error) {
func (c *Cipher) DecryptFileName(in string) (string, error) {
if c.mode == NameEncryptionOff {
remainingLength := len(in) - len(encryptedSuffix)
if remainingLength == 0 || !strings.HasSuffix(in, encryptedSuffix) {
return "", ErrorNotAnEncryptedFile
if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) {
return in[:remainingLength], nil
}
decrypted := in[:remainingLength]
if version.Match(decrypted) {
_, unversioned := version.Remove(decrypted)
if unversioned == "" {
return "", ErrorNotAnEncryptedFile
}
}
// Leave the version string on, if it was there
return decrypted, nil
return "", ErrorNotAnEncryptedFile
}
return c.decryptFileName(in)
}

View File

@@ -160,29 +160,22 @@ func TestEncryptFileName(t *testing.T) {
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Standard mode with directory name encryption off
c, _ = newCipher(NameEncryptionStandard, "", "", false)
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
// Now off mode
c, _ = newCipher(NameEncryptionOff, "", "", true)
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
// Obfuscation mode
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
// Obfuscation mode with directory name encryption off
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
}
@@ -201,19 +194,14 @@ func TestDecryptFileName(t *testing.T) {
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
} {
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
actual, actualErr := c.DecryptFileName(test.in)

View File

@@ -14,6 +14,7 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"path"
@@ -67,8 +68,8 @@ const (
defaultScope = "drive"
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = 256 * fs.Kibi
defaultChunkSize = 8 * fs.Mebi
minChunkSize = 256 * fs.KibiByte
defaultChunkSize = 8 * fs.MebiByte
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
@@ -182,64 +183,32 @@ func init() {
Description: "Google Drive",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
switch config.State {
case "":
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
OAuth2Config: driveConfig,
})
}
return fs.ConfigGoto("teamdrive")
case "teamdrive":
if opt.TeamDriveID == "" {
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", "Configure this as a Shared Drive (Team Drive)?\n")
}
return fs.ConfigConfirm("teamdrive_ok", false, "config_change_team_drive", fmt.Sprintf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID))
case "teamdrive_ok":
if config.Result == "false" {
m.Set("team_drive", "")
return nil, nil
}
f, err := newFs(ctx, name, "", m)
if err != nil {
return nil, errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return nil, err
}
if len(teamDrives) == 0 {
return fs.ConfigError("", "No Shared Drives found in your account")
}
return fs.ConfigChoose("teamdrive_final", "config_team_drive", "Shared Drive", len(teamDrives), func(i int) (string, string) {
teamDrive := teamDrives[i]
return teamDrive.Id, teamDrive.Name
})
case "teamdrive_final":
driveID := config.Result
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil, nil
// Fill in the scopes
driveConfig.Scopes = driveScopes(opt.Scope)
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder(driveConfig.Scopes) {
m.Set("root_folder_id", "appDataFolder")
}
if opt.ServiceAccountFile == "" {
err = oauthutil.Config(ctx, "drive", name, m, driveConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
}
err = configTeamDrive(ctx, opt, m, name)
if err != nil {
log.Fatalf("Failed to configure Shared Drive: %v", err)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
@@ -498,7 +467,7 @@ See: https://github.com/rclone/rclone/issues/3631
Default: false,
Help: `Make upload limit errors be fatal
At the time of writing it is only possible to upload 750 GiB of data to
At the time of writing it is only possible to upload 750GB of data to
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
@@ -515,7 +484,7 @@ See: https://github.com/rclone/rclone/issues/3857
Default: false,
Help: `Make download limit errors be fatal
At the time of writing it is only possible to download 10 TiB of data from
At the time of writing it is only possible to download 10TB of data from
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
@@ -553,7 +522,7 @@ If this flag is set then rclone will ignore shortcut files completely.
} {
for mimeType, extension := range m {
if err := mime.AddExtensionType(extension, mimeType); err != nil {
fs.Errorf("Failed to register MIME type %q: %v", mimeType, err)
log.Fatalf("Failed to register MIME type %q: %v", mimeType, err)
}
}
}
@@ -980,6 +949,48 @@ func parseExtensions(extensionsIn ...string) (extensions, mimeTypes []string, er
return
}
// Figure out if the user wants to use a team drive
func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name string) error {
ci := fs.GetConfig(ctx)
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return nil
}
if opt.TeamDriveID == "" {
fmt.Printf("Configure this as a Shared Drive (Team Drive)?\n")
} else {
fmt.Printf("Change current Shared Drive (Team Drive) ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
return nil
}
f, err := newFs(ctx, name, "", m)
if err != nil {
return errors.Wrap(err, "failed to make Fs to list Shared Drives")
}
fmt.Printf("Fetching Shared Drive list...\n")
teamDrives, err := f.listTeamDrives(ctx)
if err != nil {
return err
}
if len(teamDrives) == 0 {
fmt.Printf("No Shared Drives found in your account")
return nil
}
var driveIDs, driveNames []string
for _, teamDrive := range teamDrives {
driveIDs = append(driveIDs, teamDrive.Id)
driveNames = append(driveNames, teamDrive.Name)
}
driveID := config.Choose("Enter a Shared Drive ID", driveIDs, driveNames, true)
m.Set("team_drive", driveID)
m.Set("root_folder_id", "")
opt.TeamDriveID = driveID
opt.RootFolderID = ""
return nil
}
// getClient makes an http client according to the options
func getClient(ctx context.Context, opt *Options) *http.Client {
t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
@@ -1158,7 +1169,7 @@ func NewFs(ctx context.Context, name, path string, m configmap.Mapper) (fs.Fs, e
}
}
f.rootFolderID = rootID
fs.Debugf(f, "'root_folder_id = %s' - save this in the config to speed up startup", rootID)
fs.Debugf(f, "root_folder_id = %q - save this in the config to speed up startup", rootID)
}
f.dirCache = dircache.New(f.root, f.rootFolderID, f)

View File

@@ -1,350 +0,0 @@
// This file contains the implementation of the sync batcher for uploads
//
// Dropbox rules say you can start as many batches as you want, but
// you may only have one batch being committed and must wait for the
// batch to be finished before committing another.
package dropbox
import (
"context"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, errors.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, errors.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (batchStatus *files.UploadSessionFinishBatchLaunch, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, errors.Wrap(err, "batch commit failed")
}
return batchStatus, nil
}
// finishBatchJobStatus waits for the batch to complete returning completed entries
func (b *batcher) finishBatchJobStatus(ctx context.Context, launchBatchStatus *files.UploadSessionFinishBatchLaunch) (complete *files.UploadSessionFinishBatchResult, err error) {
if launchBatchStatus.AsyncJobId == "" {
return nil, errors.New("wait for batch completion: empty job ID")
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := 100 * time.Millisecond
const maxTries = 120
for try := 1; try <= maxTries; try++ {
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: launchBatchStatus.AsyncJobId,
})
return shouldRetry(ctx, err)
})
if err != nil {
fs.Debugf(b.f, "Wait for batch: sleeping for %v after error: %v: try %d/%d", sleepTime, err, try, maxTries)
} else {
if batchStatus.Tag == "complete" {
return batchStatus.Complete, nil
}
fs.Debugf(b.f, "Wait for batch: sleeping for %v after status: %q: try %d/%d", sleepTime, batchStatus.Tag, try, maxTries)
}
time.Sleep(sleepTime)
sleepTime *= 2
if sleepTime > time.Second {
sleepTime = time.Second
}
}
if err == nil {
err = errors.New("batch didn't complete")
}
return nil, errors.Wrapf(err, "wait for batch failed after %d tries", maxTries)
}
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
batchStatus, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
// check whether batch is complete
var complete *files.UploadSessionFinishBatchResult
switch batchStatus.Tag {
case "async_job_id":
// wait for batch to complete
complete, err = b.finishBatchJobStatus(ctx, batchStatus)
if err != nil {
return err
}
case "complete":
complete = batchStatus.Complete
default:
return errors.Errorf("batch returned unknown status %q", batchStatus.Tag)
}
// Check we got the right number of entries
entries := complete.Entries
if len(entries) != len(results) {
return errors.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
resp.entry = item.Success
} else {
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
errorTag += "/" + item.Failure.LookupFailed.Tag
}
if item.Failure.Path != nil {
errorTag += "/" + item.Failure.Path.Tag
}
if item.Failure.PropertiesError != nil {
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
resp.err = errors.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return errors.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -25,6 +25,7 @@ import (
"context"
"fmt"
"io"
"log"
"path"
"regexp"
"strings"
@@ -64,9 +65,9 @@ const (
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
// Speed vs chunk size uploading a 1 GiB file on 2017-11-22
// Speed vs chunk size uploading a 1 GB file on 2017-11-22
//
// Chunk Size MiB, Speed MiByte/s, % of max
// Chunk Size MB, Speed Mbyte/s, % of max
// 1 1.364 11%
// 2 2.443 19%
// 4 4.288 33%
@@ -81,11 +82,11 @@ const (
// 96 12.302 95%
// 128 12.945 100%
//
// Choose 48 MiB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48 MiB = 192 MiB
// Choose 48MB which is 91% of Maximum speed. rclone by
// default does 4 transfers so this should use 4*48MB = 192MB
// by default.
defaultChunkSize = 48 * fs.Mebi
maxChunkSize = 150 * fs.Mebi
defaultChunkSize = 48 * fs.MebiByte
maxChunkSize = 150 * fs.MebiByte
// Max length of filename parts: https://help.dropbox.com/installs-integrations/sync-uploads/files-not-syncing
maxFileNameLength = 255
)
@@ -98,10 +99,8 @@ var (
"files.content.write",
"files.content.read",
"sharing.write",
"account_info.read", // needed for About
// "file_requests.write",
// "members.read", // needed for impersonate - but causes app to need to be approved by Dropbox Team Admin during the flow
// "team_data.member"
},
// Endpoint: oauth2.Endpoint{
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
@@ -131,26 +130,29 @@ func getOauthConfig(m configmap.Mapper) *oauth2.Config {
}
// Make a copy of the config
config := *dropboxConfig
// Make a copy of the scopes with extra scopes requires appended
config.Scopes = append(config.Scopes, "members.read", "team_data.member")
// Make a copy of the scopes with "members.read" appended
config.Scopes = append(config.Scopes, "members.read")
return &config
}
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("dropbox", "DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: getOauthConfig(m),
NoOffline: true,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
OAuth2Opts: []oauth2.AuthCodeOption{
oauth2.SetAuthURLParam("token_access_type", "offline"),
},
})
}
err := oauthutil.Config(ctx, "dropbox", name, m, getOauthConfig(m), &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
@@ -160,7 +162,7 @@ Any files larger than this will be uploaded in chunks of this size.
Note that chunks are buffered in memory (one at a time) so rclone can
deal with retries. Setting this larger will increase the speed
slightly (at most 10%% for 128 MiB in tests) at the cost of using more
slightly (at most 10%% for 128MB in tests) at the cost of using more
memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Default: defaultChunkSize,
Advanced: true,
@@ -209,63 +211,6 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 500ms
- batch_mode: sync - default batch_timeout is 10s
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -289,10 +234,6 @@ type Options struct {
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -312,7 +253,6 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -328,6 +268,8 @@ type Object struct {
hash string // content_hash of the object
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -381,7 +323,7 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
@@ -438,10 +380,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
cfg := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -1413,13 +1351,13 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
switch info := entry.(type) {
case *files.FolderMetadata:
entryType = fs.EntryDirectory
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.FileMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
case *files.DeletedMetadata:
entryType = fs.EntryObject
entryPath = strings.TrimPrefix(info.PathDisplay, f.slashRootSlash)
entryPath = strings.TrimLeft(info.PathDisplay, f.slashRootSlash)
default:
fs.Errorf(entry, "dropbox ChangeNotify: ignoring unknown EntryType %T", entry)
continue
@@ -1441,13 +1379,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(DbHashType)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1607,83 +1538,97 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// uploadChunked uploads the object in parts
//
// Will introduce two additional network requests to start and finish the session.
// If the size is unknown (i.e. -1) the method incurs one additional
// request to the Dropbox API that does not carry a payload to close the append session.
// Will work optimally if size is >= uploadChunkSize. If the size is either
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
// start upload
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
chunks = int(size/chunkSize) + 1
}
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
fmtChunk := func(cur int, last bool) {
if chunks == 0 && last {
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
// write the first chunk
fmtChunk(1, false)
var res *files.UploadSessionStartResult
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, nil)
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, err
}
chunkSize := int64(o.fs.opt.ChunkSize)
chunks, remainder := size/chunkSize, size%chunkSize
if remainder > 0 {
chunks++
}
// write chunks
in := readers.NewCountingReader(in0)
buf := make([]byte, int(chunkSize))
cursor := files.UploadSessionCursor{
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{Cursor: &cursor}
for currentChunk := 1; ; currentChunk++ {
cursor.Offset = in.BytesRead()
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
if chunks < 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", currentChunk)
} else {
fs.Debugf(o, "Uploading chunk %d/%d", currentChunk, chunks)
// write more whole chunks (if any)
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
err = o.fs.pacer.Call(func() (bool, error) {
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
// after session is started, we retry everything
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, err
}
if appendArg.Close {
break
}
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
}
currentChunk++
}
// finish upload
// write the remains
cursor.Offset = in.BytesRead()
args := &files.UploadSessionFinishArg{
Cursor: &cursor,
Commit: commitInfo,
}
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, args)
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// seek to the start in case this is a retry
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -1750,7 +1695,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size < 0 || o.fs.batcher.Batching() {
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
entry, err = o.uploadChunked(ctx, in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1761,15 +1706,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "upload failed")
}
// If we haven't received data back from batch upload then fake it
//
// This will only happen if we are uploading async batches
if entry == nil {
o.bytes = size
o.modTime = commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}
@@ -1797,7 +1733,6 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = &Fs{}
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -4,7 +4,6 @@ import (
"context"
"io"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
@@ -91,7 +90,6 @@ func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenRespons
request := DownloadRequest{
URL: url,
Single: 1,
Pass: f.opt.FilePassword,
}
opts := rest.Opts{
Method: "POST",
@@ -120,16 +118,10 @@ func fileFromSharedFile(file *SharedFile) File {
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
ContentType: "application/x-www-form-urlencoded",
}
if f.opt.FolderPassword != "" {
opts.Method = "POST"
opts.Parameters = nil
opts.Body = strings.NewReader("json=1&pass=" + url.QueryEscape(f.opt.FolderPassword))
Method: "GET",
RootURL: "https://1fichier.com/dir/",
Path: id,
Parameters: map[string][]string{"json": {"1"}},
}
var sharedFiles SharedFolderResponse
@@ -319,7 +311,7 @@ func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (respo
return nil, errors.Wrap(err, "couldn't remove folder")
}
if response.Status != "OK" {
return nil, errors.Errorf("can't remove folder: %s", response.Message)
return nil, errors.New("Can't remove non-empty dir")
}
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
@@ -404,34 +396,6 @@ func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename stri
return response, nil
}
func (f *Fs) renameFile(ctx context.Context, url string, newName string) (response *RenameFileResponse, err error) {
request := &RenameFileRequest{
URLs: []RenameFileURL{
{
URL: url,
Filename: newName,
},
},
}
opts := rest.Opts{
Method: "POST",
Path: "/file/rename.cgi",
}
response = &RenameFileResponse{}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
return response, nil
}
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
// fs.Debugf(f, "Requesting Upload node")

View File

@@ -35,7 +35,9 @@ func init() {
fs.Register(&fs.RegInfo{
Name: "fichier",
Description: "1Fichier",
NewFs: NewFs,
Config: func(ctx context.Context, name string, config configmap.Mapper) {
},
NewFs: NewFs,
Options: []fs.Option{{
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
Name: "api_key",
@@ -44,18 +46,6 @@ func init() {
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -87,11 +77,9 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
FilePassword string `config:"file_password"`
FolderPassword string `config:"folder_password"`
Enc encoder.MultiEncoder `config:"encoding"`
APIKey string `config:"api_key"`
SharedFolder string `config:"shared_folder"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs is the interface a cloud storage system must provide
@@ -437,45 +425,25 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
// Find current directory ID
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
if err != nil {
return nil, err
}
// Create temporary object
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
if err != nil {
return nil, err
}
// If it is in the correct directory, just rename it
var url string
if currentDirectoryID == directoryID {
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't rename file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't rename file: %s", resp.Message)
}
url = resp.URLs[0].URL
} else {
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
}
url = resp.URLs[0]
folderID, err := strconv.Atoi(directoryID)
if err != nil {
return nil, err
}
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
if err != nil {
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, url)
file, err := f.readFileInfo(ctx, resp.URLs[0])
if err != nil {
return nil, errors.New("couldn't read file data")
}
@@ -506,7 +474,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, errors.Wrap(err, "couldn't move file")
}
if resp.Status != "OK" {
return nil, errors.Errorf("couldn't move file: %s", resp.Message)
return nil, errors.New("couldn't move file")
}
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)

View File

@@ -19,7 +19,6 @@ type ListFilesRequest struct {
type DownloadRequest struct {
URL string `json:"url"`
Single int `json:"single"`
Pass string `json:"pass,omitempty"`
}
// RemoveFolderRequest is the request structure of the corresponding request
@@ -64,9 +63,8 @@ type MoveFileRequest struct {
// MoveFileResponse is the response structure of the corresponding request
type MoveFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
URLs []string `json:"urls"`
Status string `json:"status"`
URLs []string `json:"urls"`
}
// CopyFileRequest is the request structure of the corresponding request
@@ -78,10 +76,9 @@ type CopyFileRequest struct {
// CopyFileResponse is the response structure of the corresponding request
type CopyFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
Status string `json:"status"`
Copied int `json:"copied"`
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the the CopyFileResponse
@@ -90,30 +87,6 @@ type FileCopy struct {
ToURL string `json:"to_url"`
}
// RenameFileURL is the data structure to rename a single file
type RenameFileURL struct {
URL string `json:"url"`
Filename string `json:"filename"`
}
// RenameFileRequest is the request structure of the corresponding request
type RenameFileRequest struct {
URLs []RenameFileURL `json:"urls"`
Pretty int `json:"pretty"`
}
// RenameFileResponse is the response structure of the corresponding request
type RenameFileResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Renamed int `json:"renamed"`
URLs []struct {
URL string `json:"url"`
OldFilename string `json:"old_filename"`
NewFilename string `json:"new_filename"`
} `json:"urls"`
}
// GetUploadNodeResponse is the response structure of the corresponding request
type GetUploadNodeResponse struct {
ID string `json:"id"`

View File

@@ -5,7 +5,6 @@ package api
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strings"
@@ -52,23 +51,6 @@ func (t Time) String() string {
return time.Time(t).UTC().Format(timeFormatParameters)
}
// Int represents an integer which can be represented in JSON as a
// quoted integer or an integer.
type Int int
// MarshalJSON turns a Int into JSON
func (i *Int) MarshalJSON() (out []byte, err error) {
return json.Marshal((*int)(i))
}
// UnmarshalJSON turns JSON into a Int
func (i *Int) UnmarshalJSON(data []byte) error {
if len(data) >= 2 && data[0] == '"' && data[len(data)-1] == '"' {
data = data[1 : len(data)-1]
}
return json.Unmarshal(data, (*int)(i))
}
// Status return returned in all status responses
type Status struct {
Code string `json:"status"`
@@ -133,7 +115,7 @@ type GetFolderContentsResponse struct {
Total int `json:"total,string"`
Items []Item `json:"filelist"`
Folder Item `json:"folder"`
From Int `json:"from"`
From int `json:"from,string"`
//Count int `json:"count"`
Pid string `json:"pid"`
RefreshResult Status `json:"refreshresult"`

View File

@@ -241,6 +241,23 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
type dialCtx struct {
f *Fs
ctx context.Context
}
// dial a new connection with fshttp dialer
func (d *dialCtx) dial(network, address string) (net.Conn, error) {
conn, err := fshttp.NewDialer(d.ctx).Dial(network, address)
if err != nil {
return nil, err
}
if d.f.tlsConf != nil {
conn = tls.Client(conn, d.f.tlsConf)
}
return conn, err
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
@@ -260,22 +277,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS
dial := func(network, address string) (conn net.Conn, err error) {
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
}
return
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
dCtx := dialCtx{f, ctx}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dCtx.dial)}
if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)

View File

@@ -19,9 +19,9 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"path"
"strconv"
"strings"
"time"
@@ -51,10 +51,10 @@ import (
const (
rcloneClientID = "202264815644.apps.googleusercontent.com"
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
timeFormat = time.RFC3339Nano
metaMtime = "mtime" // key to store mtime in metadata
metaMtimeGsutil = "goog-reserved-file-mtime" // key used by GSUtil to store mtime in metadata
listChunks = 1000 // chunk size to read directory listings
timeFormatIn = time.RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
listChunks = 1000 // chunk size to read directory listings
minSleep = 10 * time.Millisecond
)
@@ -76,16 +76,17 @@ func init() {
Prefix: "gcs",
Description: "Google Cloud Storage (this is not Google Drive)",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
return nil, nil
return
}
err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: storageConfig,
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "project_number",
@@ -921,7 +922,7 @@ func (o *Object) setMetaData(info *storage.Object) {
// read mtime out of metadata if available
mtimeString, ok := info.Metadata[metaMtime]
if ok {
modTime, err := time.Parse(timeFormat, mtimeString)
modTime, err := time.Parse(timeFormatIn, mtimeString)
if err == nil {
o.modTime = modTime
return
@@ -929,19 +930,8 @@ func (o *Object) setMetaData(info *storage.Object) {
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
}
// Fallback to GSUtil mtime
mtimeGsutilString, ok := info.Metadata[metaMtimeGsutil]
if ok {
unixTimeSec, err := strconv.ParseInt(mtimeGsutilString, 10, 64)
if err == nil {
o.modTime = time.Unix(unixTimeSec, 0)
return
}
fs.Debugf(o, "Failed to read GSUtil mtime from metadata: %s", err)
}
// Fallback to the Updated time
modTime, err := time.Parse(timeFormat, info.Updated)
modTime, err := time.Parse(timeFormatIn, info.Updated)
if err != nil {
fs.Logf(o, "Bad time decode: %v", err)
} else {
@@ -998,8 +988,7 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
// Returns metadata for an object
func metadataFromModTime(modTime time.Time) map[string]string {
metadata := make(map[string]string, 1)
metadata[metaMtime] = modTime.Format(timeFormat)
metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
metadata[metaMtime] = modTime.Format(timeFormatOut)
return metadata
}
@@ -1011,11 +1000,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
return err
}
// Add the mtime to the existing metadata
mtime := modTime.Format(timeFormatOut)
if object.Metadata == nil {
object.Metadata = make(map[string]string, 1)
}
object.Metadata[metaMtime] = modTime.Format(timeFormat)
object.Metadata[metaMtimeGsutil] = strconv.FormatInt(modTime.Unix(), 10)
object.Metadata[metaMtime] = mtime
// Copy the object to itself to update the metadata
// Using PATCH requires too many permissions
bucket, bucketPath := o.split()

View File

@@ -8,6 +8,7 @@ import (
"encoding/json"
"fmt"
"io"
golog "log"
"net/http"
"net/url"
"path"
@@ -77,36 +78,36 @@ func init() {
Prefix: "gphotos",
Description: "Google Photos",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse config into struct")
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
return
}
switch config.State {
case "":
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
return oauthutil.ConfigOut("warning", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
case "warning":
// Warn the user as required by google photos integration
return fs.ConfigConfirm("warning_done", true, "config_warning", `Warning
IMPORTANT: All media items uploaded to Google Photos with rclone
are stored in full resolution at original quality. These uploads
will count towards storage in your Google Account.`)
case "warning_done":
return nil, nil
// Fill in the scopes
if opt.ReadOnly {
oauthConfig.Scopes[0] = scopeReadOnly
} else {
oauthConfig.Scopes[0] = scopeReadWrite
}
return nil, fmt.Errorf("unknown state %q", config.State)
// Do the oauth
err = oauthutil.Config(ctx, "google photos", name, m, oauthConfig, nil)
if err != nil {
golog.Fatalf("Failed to configure token: %v", err)
}
// Warn the user
fmt.Print(`
*** IMPORTANT: All media items uploaded to Google Photos with rclone
*** are stored in full resolution at original quality. These uploads
*** will count towards storage in your Google Account.
`)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",

View File

@@ -47,7 +47,7 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
ts := httptest.NewServer(handler)
// Configure the remote
configfile.Install()
configfile.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true

View File

@@ -11,6 +11,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"time"
@@ -55,10 +56,11 @@ func init() {
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "hubic", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
@@ -48,29 +49,37 @@ const (
rootURL = "https://jfs.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configTokenURL = "tokenURL"
configClientID = "client_id"
configClientSecret = "client_secret"
configUsername = "username"
configVersion = 1
defaultTokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
defaultClientID = "jottacli"
legacyTokenURL = "https://api.jottacloud.com/auth/v1/token"
legacyRegisterURL = "https://api.jottacloud.com/auth/v1/register"
legacyClientID = "nibfk8biu12ju7hpqomr8b1e40"
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
v1tokenURL = "https://api.jottacloud.com/auth/v1/token"
v1registerURL = "https://api.jottacloud.com/auth/v1/register"
v1ClientID = "nibfk8biu12ju7hpqomr8b1e40"
v1EncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
v1configVersion = 0
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
// needs to be done early so we can use oauth during config
@@ -78,7 +87,42 @@ func init() {
Name: "jottacloud",
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
log.Fatalf("Failed to parse config version - corrupted config")
}
refresh = (ver != configVersion) && (ver != v1configVersion)
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
}
fmt.Printf("Choose authentication type:\n" +
"1: Standard authentication - use this if you're a normal Jottacloud user.\n" +
"2: Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n" +
"3: Telia Cloud authentication - use this if you are using Telia Cloud.\n")
switch config.ChooseNumber("Your choice", 1, 3) {
case 1:
v2config(ctx, name, m)
case 2:
v1config(ctx, name, m)
case 3:
teliaCloudConfig(ctx, name, m)
}
},
Options: []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
@@ -113,183 +157,6 @@ func init() {
})
}
// Config runs the backend configuration protocol
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
switch config.State {
case "":
return fs.ConfigChooseFixed("auth_type_done", "config_type", `Authentication type`, []fs.OptionExample{{
Value: "standard",
Help: "Standard authentication - use this if you're a normal Jottacloud user.",
}, {
Value: "legacy",
Help: "Legacy authentication - this is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia",
Help: "Telia Cloud authentication - use this if you are using Telia Cloud.",
}})
case "auth_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(config.Result)
case "standard": // configure a jottacloud backend using the modern JottaCli token based authentication
m.Set("configVersion", fmt.Sprint(configVersion))
return fs.ConfigInput("standard_token", "config_login_token", "Personal login token.\n\nGenerate here: https://www.jottacloud.com/web/secure")
case "standard_token":
loginToken := config.Result
m.Set(configClientID, defaultClientID)
m.Set(configClientSecret, "")
srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
}
m.Set(configTokenURL, tokenEndpoint)
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
}
return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
return fs.ConfigConfirm("legacy_api", false, "config_machine_specific", `Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one
only uses rclone on a single machine. When you want to use rclone with
this account on more than one machine it's recommended to create a
machine specific API key. These keys can NOT be shared between
machines.`)
case "legacy_api":
srv := rest.NewClient(fshttp.NewClient(ctx))
if config.Result == "true" {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
return nil, errors.Wrap(err, "failed to register device")
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID %q and clientSecret %q", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
return fs.ConfigInput("legacy_username", "config_username", "Username (e-mail address)")
case "legacy_username":
m.Set(configUsername, config.Result)
return fs.ConfigPassword("legacy_password", "config_password", "Password (only used in setup, will not be stored)")
case "legacy_password":
m.Set("password", config.Result)
m.Set("auth_code", "")
return fs.ConfigGoto("legacy_do_auth")
case "legacy_auth_code":
authCode := strings.Replace(config.Result, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
m.Set("auth_code", authCode)
return fs.ConfigGoto("legacy_do_auth")
case "legacy_do_auth":
username, _ := m.Get(configUsername)
password, _ := m.Get("password")
password = obscure.MustReveal(password)
authCode, _ := m.Get("auth_code")
srv := rest.NewClient(fshttp.NewClient(ctx))
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
}
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: legacyTokenURL,
},
ClientID: clientID,
ClientSecret: obscure.MustReveal(clientSecret),
}
token, err := doLegacyAuth(ctx, srv, oauthConfig, username, password, authCode)
if err == errAuthCodeRequired {
return fs.ConfigInput("legacy_auth_code", "config_auth_code", "Verification Code\nThis account uses 2 factor authentication you will receive a verification code via SMS.")
}
m.Set("password", "")
m.Set("auth_code", "")
if err != nil {
return nil, errors.Wrap(err, "failed to get oauth token")
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
return nil, errors.Wrap(err, "error while saving token")
}
return fs.ConfigGoto("choose_device")
case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "choose_device":
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
case "choose_device_query":
if config.Result != "true" {
m.Set(configDevice, "")
m.Set(configMountpoint, "")
return fs.ConfigGoto("end")
}
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
m.Set(configUsername, cust.Username)
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_result", "config_device", `Please select the device to use. Normally this will be Jotta`, len(acc.Devices), func(i int) (string, string) {
return acc.Devices[i].Name, ""
})
case "choose_device_result":
device := config.Result
m.Set(configDevice, device)
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
username, _ := m.Get(configUsername)
dev, err := getDeviceInfo(ctx, srv, path.Join(username, device))
if err != nil {
return nil, err
}
return fs.ConfigChoose("choose_device_mountpoint", "config_mountpoint", `Please select the mountpoint to use. Normally this will be Archive.`, len(dev.MountPoints), func(i int) (string, string) {
return dev.MountPoints[i].Name, ""
})
case "choose_device_mountpoint":
mountpoint := config.Result
m.Set(configMountpoint, mountpoint)
return fs.ConfigGoto("end")
case "end":
// All the config flows end up here in case we need to carry on with something
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
type Options struct {
Device string `config:"device"`
@@ -350,21 +217,10 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// joinPath joins two path/url elements
//
// Does not perform clean on the result like path.Join does,
// which breaks urls by changing prefix "https://" into "https:/".
func joinPath(base string, rel string) string {
if rel == "" {
return base
}
if strings.HasSuffix(base, "/") {
return base + strings.TrimPrefix(rel, "/")
}
if strings.HasPrefix(rel, "/") {
return strings.TrimSuffix(base, "/") + rel
}
return base + "/" + rel
// parsePath parses a box 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// retryErrorCodes is a slice of error codes that we will retry
@@ -386,6 +242,110 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func teliaCloudConfig(ctx context.Context, name string, m configmap.Mapper) {
teliaCloudOauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
}
err := oauthutil.Config(ctx, "jottacloud", name, m, teliaCloudOauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, teliaCloudOauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv := rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
}
// v1config configure a jottacloud backend using legacy authentication
func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to register device: %v", err)
}
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.AuthURL = v1tokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
token, err := doAuthV1(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(v1configVersion))
}
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
@@ -404,7 +364,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
opts := rest.Opts{
Method: "POST",
RootURL: legacyRegisterURL,
RootURL: v1registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
@@ -415,13 +375,8 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
var errAuthCodeRequired = errors.New("auth code required")
// doLegacyAuth runs the actual token request for V1 authentication
//
// Call this first with blank authCode. If errAuthCodeRequired is
// returned then call it again with an authCode
func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Config, username, password, authCode string) (token oauth2.Token, err error) {
// doAuthV1 runs the actual token request for V1 authentication
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("grant_type", "PASSWORD")
@@ -435,19 +390,22 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
ContentType: "application/x-www-form-urlencoded",
Parameters: values,
}
if authCode != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
}
// do the first request
var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil && authCode == "" {
if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
return token, errAuthCodeRequired
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
@@ -459,11 +417,51 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
return token, err
}
// doTokenAuth runs the actual token request for V2 authentication
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(ctx))
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
err = oauthutil.PutToken(name, m, &token, true)
if err != nil {
log.Fatalf("Error while saving token: %s", err)
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
srv = rest.NewClient(oAuthClient).SetRoot(rootURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
device, mountpoint, err := setupMountpoint(ctx, srv, apiSrv)
if err != nil {
log.Fatalf("Failed to setup mountpoint: %s", err)
}
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
}
// doAuthV2 runs the actual token request for V2 authentication
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, "", err
return token, err
}
// decode login token
@@ -471,7 +469,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, "", err
return token, err
}
// retrieve endpoint urls
@@ -480,14 +478,19 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
RootURL: loginToken.WellKnownLink,
}
var wellKnown api.WellKnown
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
_, err = srv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, "", err
return token, err
}
// save the tokenurl
oauthConfig.Endpoint.AuthURL = wellKnown.TokenEndpoint
oauthConfig.Endpoint.TokenURL = wellKnown.TokenEndpoint
m.Set(configTokenURL, wellKnown.TokenEndpoint)
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", defaultClientID)
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
@@ -495,33 +498,68 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
values.Encode()
opts = rest.Opts{
Method: "POST",
RootURL: wellKnown.TokenEndpoint,
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
}
// do the first request
var jsonToken api.TokenJSON
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, "", err
return token, err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, wellKnown.TokenEndpoint, err
return token, err
}
// setupMountpoint sets up a custom device and mountpoint if desired by the user
func setupMountpoint(ctx context.Context, srv *rest.Client, apiSrv *rest.Client) (device, mountpoint string, err error) {
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return "", "", err
}
acc, err := getDriveInfo(ctx, srv, cust.Username)
if err != nil {
return "", "", err
}
var deviceNames []string
for i := range acc.Devices {
deviceNames = append(deviceNames, acc.Devices[i].Name)
}
fmt.Printf("Please select the device to use. Normally this will be Jotta\n")
device = config.Choose("Devices", deviceNames, nil, false)
dev, err := getDeviceInfo(ctx, srv, path.Join(cust.Username, device))
if err != nil {
return "", "", err
}
if len(dev.MountPoints) == 0 {
return "", "", errors.New("no mountpoints for selected device")
}
var mountpointNames []string
for i := range dev.MountPoints {
mountpointNames = append(mountpointNames, dev.MountPoints[i].Name)
}
fmt.Printf("Please select the mountpoint to user. Normally this will be Archive\n")
mountpoint = config.Choose("Mountpoints", mountpointNames, nil, false)
return device, mountpoint, err
}
// getCustomerInfo queries general information about the account
func getCustomerInfo(ctx context.Context, apiSrv *rest.Client) (info *api.CustomerInfo, err error) {
func getCustomerInfo(ctx context.Context, srv *rest.Client) (info *api.CustomerInfo, err error) {
opts := rest.Opts{
Method: "GET",
Path: "account/v1/customer",
}
_, err = apiSrv.CallJSON(ctx, &opts, nil, &info)
_, err = srv.CallJSON(ctx, &opts, nil, &info)
if err != nil {
return nil, errors.Wrap(err, "couldn't get customer info")
}
@@ -638,7 +676,7 @@ func (f *Fs) filePath(file string) string {
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
if v1tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
@@ -654,50 +692,53 @@ func grantTypeFilter(req *http.Request) {
}
}
func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuthClient *http.Client, ts *oauthutil.TokenSource, err error) {
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Check config version
var ver int
version, ok := m.Get("configVersion")
if ok {
ver, err = strconv.Atoi(version)
if err != nil {
return nil, nil, errors.New("Failed to parse config version")
return nil, errors.New("Failed to parse config version")
}
ok = (ver == configVersion) || (ver == legacyConfigVersion)
ok = (ver == configVersion) || (ver == v1configVersion)
}
if !ok {
return nil, nil, errors.New("Outdated config - please reconfigure this backend")
return nil, errors.New("Outdated config - please reconfigure this backend")
}
baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
}
if ver == configVersion {
oauthConfig.ClientID = defaultClientID
oauthConfig.ClientID = "jottacli"
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
} else if ver == v1configVersion {
clientID, ok := m.Get(configClientID)
if !ok {
clientID = legacyClientID
clientID = v1ClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = legacyEncryptedClientSecret
clientSecret = v1EncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.Endpoint.AuthURL = legacyTokenURL
oauthConfig.Endpoint.TokenURL = v1tokenURL
oauthConfig.Endpoint.AuthURL = v1tokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {
@@ -710,29 +751,13 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
}
// Create OAuth Client
oAuthClient, ts, err = oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, baseClient)
if err != nil {
return nil, nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
return oAuthClient, ts, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
oAuthClient, ts, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
}
rootIsDir := strings.HasSuffix(root, "/")
root = strings.Trim(root, "/")
root = parsePath(root)
f := &Fs{
name: name,
@@ -1270,7 +1295,8 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if result.PublicSharePath == "" {
return "", errors.New("couldn't create public link - no link path received")
}
return joinPath(baseURL, result.PublicSharePath), nil
link = path.Join(baseURL, result.PublicSharePath)
return link, nil
}
// About gets quota information

View File

@@ -534,7 +534,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return nil
}
// About reports space usage (with a MiB precision)
// About reports space usage (with a MB precision)
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
mount, err := f.client.MountsDetails(f.mountID)
if err != nil {

View File

@@ -27,7 +27,6 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/text/unicode/norm"
)
// Constants
@@ -74,34 +73,25 @@ points, as you explicitly acknowledge that they should be skipped.`,
Advanced: true,
}, {
Name: "zero_size_links",
Help: `Assume the Stat size of links is zero (and read them instead) (Deprecated)
Help: `Assume the Stat size of links is zero (and read them instead)
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places
On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0.
However, on unix it reads as the length of the text in the link. This may cause errors like this when
syncing:
- Windows
- On some virtual filesystems (such ash LucidLink)
- Android
Failed to copy: corrupted on transfer: sizes differ 0 vs 13
So rclone now always reads the link
`,
Setting this flag causes rclone to read the link and use that as the size of the link
instead of 0 which in most cases fixes the problem.`,
Default: false,
Advanced: true,
}, {
Name: "unicode_normalization",
Help: `Apply unicode NFC normalization to paths and filenames
Name: "no_unicode_normalization",
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
This flag can be used to normalize file names into unicode NFC form
that are read from the local filesystem.
Rclone does not normally touch the encoding of file names it reads from
the file system.
This can be useful when using macOS as it normally provides decomposed (NFD)
unicode which in some language (eg Korean) doesn't display properly on
some OSes.
Note that rclone compares filenames with unicode normalization in the sync
routine so this flag shouldn't normally be used.`,
This flag is deprecated now. Rclone no longer normalizes unicode file
names, but it compares them with unicode normalization in the sync
routine instead.`,
Default: false,
Advanced: true,
}, {
@@ -206,7 +196,8 @@ type Options struct {
FollowSymlinks bool `config:"copy_links"`
TranslateSymlinks bool `config:"links"`
SkipSymlinks bool `config:"skip_links"`
UTFNorm bool `config:"unicode_normalization"`
ZeroSizeLinks bool `config:"zero_size_links"`
NoUTFNorm bool `config:"no_unicode_normalization"`
NoCheckUpdated bool `config:"no_check_updated"`
NoUNC bool `config:"nounc"`
OneFileSystem bool `config:"one_file_system"`
@@ -265,6 +256,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, errLinksAndCopyLinks
}
if opt.NoUTFNorm {
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
}
f := &Fs{
name: name,
opt: *opt,
@@ -527,9 +522,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
if f.opt.UTFNorm {
filename = norm.NFC.String(filename)
}
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
if !utf8.ValidString(filename) {
@@ -1275,13 +1267,9 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// Read the size of the link.
//
// The value in info.Size() is not always correct
// - Windows links read as 0 size
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
// - Android - some versions the links are larger than readlink suggests
if o.translatedLink {
// On Windows links read as 0 size so set the correct size here
// Optionally, users can turn this feature on with the zero_size_links flag
if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)

View File

@@ -6,8 +6,8 @@ import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
"log"
"time"
"github.com/pkg/errors"
@@ -48,7 +48,7 @@ func (w *BinWriter) Reader() io.Reader {
// WritePu16 writes a short as unsigned varint
func (w *BinWriter) WritePu16(val int) {
if val < 0 || val > 65535 {
panic(fmt.Sprintf("Invalid UInt16 %v", val))
log.Fatalf("Invalid UInt16 %v", val)
}
w.WritePu64(int64(val))
}
@@ -56,7 +56,7 @@ func (w *BinWriter) WritePu16(val int) {
// WritePu32 writes a signed long as unsigned varint
func (w *BinWriter) WritePu32(val int64) {
if val < 0 || val > 4294967295 {
panic(fmt.Sprintf("Invalid UInt32 %v", val))
log.Fatalf("Invalid UInt32 %v", val)
}
w.WritePu64(val)
}
@@ -64,7 +64,7 @@ func (w *BinWriter) WritePu32(val int64) {
// WritePu64 writes an unsigned (actually, signed) long as unsigned varint
func (w *BinWriter) WritePu64(val int64) {
if val < 0 {
panic(fmt.Sprintf("Invalid UInt64 %v", val))
log.Fatalf("Invalid UInt64 %v", val)
}
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
@@ -123,7 +123,7 @@ func (r *BinReader) check(err error) bool {
r.err = err
}
if err != io.EOF {
panic(fmt.Sprintf("Error parsing response: %v", err))
log.Fatalf("Error parsing response: %v", err)
}
return false
}

View File

@@ -80,7 +80,7 @@ var oauthConfig = &oauth2.Config{
// Register with Fs
func init() {
MrHashType = hash.RegisterHash("mailru", "MailruHash", 40, mrhash.New)
MrHashType = hash.RegisterHash("MailruHash", 40, mrhash.New)
fs.Register(&fs.RegInfo{
Name: "mailru",
Description: "Mail.ru Cloud",

View File

@@ -9,6 +9,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
@@ -51,8 +52,8 @@ const (
driveTypePersonal = "personal"
driveTypeBusiness = "business"
driveTypeSharepoint = "documentLibrary"
defaultChunkSize = 10 * fs.Mebi
chunkSizeMultiple = 320 * fs.Kibi
defaultChunkSize = 10 * fs.MebiByte
chunkSizeMultiple = 320 * fs.KibiByte
regionGlobal = "global"
regionUS = "us"
@@ -93,12 +94,215 @@ var (
// Register with Fs
func init() {
QuickXorHashType = hash.RegisterHash("quickxor", "QuickXorHash", 40, quickxorhash.New)
QuickXorHashType = hash.RegisterHash("QuickXorHash", 40, quickxorhash.New)
fs.Register(&fs.RegInfo{
Name: "onedrive",
Description: "Microsoft OneDrive",
NewFs: NewFs,
Config: Config,
Config: func(ctx context.Context, name string, m configmap.Mapper) {
region, _ := m.Get("region")
graphURL := graphAPIEndpoint[region] + "/v1.0"
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
ci := fs.GetConfig(ctx)
err := oauthutil.Config(ctx, "onedrive", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to configure OneDrive: %v", err)
}
srv := rest.NewClient(oAuthClient)
var opts rest.Opts
var finalDriveID string
var siteID string
var relativePath string
switch config.Choose("Your choice",
[]string{"onedrive", "sharepoint", "url", "search", "driveid", "siteid", "path"},
[]string{
"OneDrive Personal or Business",
"Root Sharepoint site",
"Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
"Search for a Sharepoint site",
"Type in driveID (advanced)",
"Type in SiteID (advanced)",
"Sharepoint server-relative path (advanced, e.g. /teams/hr)",
},
false) {
case "onedrive":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
}
case "sharepoint":
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
}
case "driveid":
fmt.Printf("Paste your Drive ID here> ")
finalDriveID = config.ReadLine()
case "siteid":
fmt.Printf("Paste your Site ID here> ")
siteID = config.ReadLine()
case "url":
fmt.Println("Example: \"https://contoso.sharepoint.com/sites/mysite\" or \"mysite\"")
fmt.Printf("Paste your Site URL here> ")
siteURL := config.ReadLine()
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
relativePath = "/sites/" + match[1]
} else {
relativePath = "/sites/" + siteURL
}
case "path":
fmt.Printf("Enter server-relative URL here> ")
relativePath = config.ReadLine()
case "search":
fmt.Printf("What to search for> ")
searchTerm := config.ReadLine()
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
log.Fatalf("Failed to query available sites: %v", err)
}
if len(sites.Sites) == 0 {
log.Fatalf("Search for '%s' returned no results", searchTerm)
} else {
fmt.Printf("Found %d sites, please select the one you want to use:\n", len(sites.Sites))
for index, site := range sites.Sites {
fmt.Printf("%d: %s (%s) id=%s\n", index, site.SiteName, site.SiteURL, site.SiteID)
}
siteID = sites.Sites[config.ChooseNumber("Chose drive to use:", 0, len(sites.Sites)-1)].SiteID
}
}
// if we use server-relative URL for finding the drive
if relativePath != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &site)
if err != nil {
log.Fatalf("Failed to query available site by relative path: %v", err)
}
siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if siteID != "" {
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + siteID + "/drives",
}
}
// We don't have the final ID yet?
// query Microsoft Graph
if finalDriveID == "" {
drives := drivesResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &drives)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opts.Path == "/me/drives" {
opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opts, nil, &meDrive)
if err != nil {
log.Fatalf("Failed to query available drives: %v", err)
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
if len(drives.Drives) == 0 {
log.Fatalf("No drives found")
} else {
fmt.Printf("Found %d drives, please select the one you want to use:\n", len(drives.Drives))
for index, drive := range drives.Drives {
fmt.Printf("%d: %s (%s) id=%s\n", index, drive.DriveName, drive.DriveType, drive.DriveID)
}
finalDriveID = drives.Drives[config.ChooseNumber("Chose drive to use:", 0, len(drives.Drives)-1)].DriveID
}
}
// Test the driveID and get drive type
opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
if err != nil {
log.Fatalf("Failed to query root for drive %s: %v", finalDriveID, err)
}
fmt.Printf("Found drive '%s' of type '%s', URL: %s\nIs that okay?\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL)
// This does not work, YET :)
if !config.ConfirmWithConfig(ctx, m, "config_drive_ok", true) {
log.Fatalf("Cancelled by user")
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: "Choose national cloud region for OneDrive.",
@@ -261,266 +465,6 @@ At the time of writing this only works with OneDrive personal paid accounts.
})
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
// Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
region, _ = m.Get("region")
graphURL = graphAPIEndpoint[region] + "/v1.0"
return region, graphURL
}
// Config for chooseDrive
type chooseDriveOpt struct {
opts rest.Opts
finalDriveID string
siteID string
relativePath string
}
// chooseDrive returns a query to choose which drive the user is interested in
func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest.Client, opt chooseDriveOpt) (*fs.ConfigOut, error) {
_, graphURL := getRegionURL(m)
// if we use server-relative URL for finding the drive
if opt.relativePath != "" {
opt.opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root:" + opt.relativePath,
}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
}
opt.siteID = site.SiteID
}
// if we have a siteID we need to ask for the drives
if opt.siteID != "" {
opt.opts = rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/" + opt.siteID + "/drives",
}
}
drives := drivesResponse{}
// We don't have the final ID yet?
// query Microsoft Graph
if opt.finalDriveID == "" {
_, err := srv.CallJSON(ctx, &opt.opts, nil, &drives)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
}
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt.opts.Path == "/me/drives" {
opt.opts.Path = "/me/drive"
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
}
found := false
for _, drive := range drives.Drives {
if drive.DriveID == meDrive.DriveID {
found = true
break
}
}
// add the me drive if not found already
if !found {
fs.Debugf(nil, "Adding %v to drives list from /me/drive", meDrive)
drives.Drives = append(drives.Drives, meDrive)
}
}
} else {
drives.Drives = append(drives.Drives, driveResource{
DriveID: opt.finalDriveID,
DriveName: "Chosen Drive ID",
DriveType: "drive",
})
}
if len(drives.Drives) == 0 {
return fs.ConfigError("choose_type", "No drives found")
}
return fs.ConfigChoose("driveid_final", "config_driveid", "Select drive you want to use", len(drives.Drives), func(i int) (string, string) {
drive := drives.Drives[i]
return drive.DriveID, fmt.Sprintf("%s (%s)", drive.DriveName, drive.DriveType)
})
}
// Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
region, graphURL := getRegionURL(m)
if config.State == "" {
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure OneDrive")
}
srv := rest.NewClient(oAuthClient)
switch config.State {
case "choose_type":
return fs.ConfigChooseFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive",
Help: "OneDrive Personal or Business",
}, {
Value: "sharepoint",
Help: "Root Sharepoint site",
}, {
Value: "url",
Help: "Sharepoint site name or URL (e.g. mysite or https://contoso.sharepoint.com/sites/mysite)",
}, {
Value: "search",
Help: "Search for a Sharepoint site",
}, {
Value: "driveid",
Help: "Type in driveID (advanced)",
}, {
Value: "siteid",
Help: "Type in SiteID (advanced)",
}, {
Value: "path",
Help: "Sharepoint server-relative path (advanced, e.g. /teams/hr)",
}})
case "choose_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(config.Result)
case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/me/drives",
},
})
case "sharepoint":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites/root/drives",
},
})
case "driveid":
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: config.Result,
})
case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: config.Result,
})
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + match[1],
})
}
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: "/sites/" + siteURL,
})
case "path":
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: config.Result,
})
case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end":
searchTerm := config.Result
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/sites?search=" + searchTerm,
}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
}
if len(sites.Sites) == 0 {
return fs.ConfigError("choose_type", fmt.Sprintf("search for %q returned no results", searchTerm))
}
return fs.ConfigChoose("search_sites", "config_site", `Select the Site you want to use`, len(sites.Sites), func(i int) (string, string) {
site := sites.Sites[i]
return site.SiteID, fmt.Sprintf("%s (%s)", site.SiteName, site.SiteURL)
})
case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: config.Result,
})
case "driveid_final":
finalDriveID := config.Result
// Test the driveID and get drive type
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
Path: "/drives/" + finalDriveID + "/root"}
var rootItem api.Item
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query root for drive %q: %v", finalDriveID, err))
}
m.Set(configDriveID, finalDriveID)
m.Set(configDriveType, rootItem.ParentReference.DriveType)
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end":
if config.Result == "true" {
return nil, nil
}
return fs.ConfigGoto("choose_type")
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
type Options struct {
Region string `config:"region"`
@@ -622,9 +566,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
retry = true
fs.Debugf(nil, "Should retry: %v", err)
} else if err != nil && strings.Contains(err.Error(), "Unable to initialize RPS") {
retry = true
fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.")
}
case 429: // Too Many Requests.
// see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online
@@ -752,7 +693,7 @@ func errorHandler(resp *http.Response) error {
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
const minChunkSize = fs.Byte
if cs%chunkSizeMultiple != 0 {
return errors.Errorf("%s is not a multiple of %s", cs, chunkSizeMultiple)
}
@@ -1916,7 +1857,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
fs.Debugf(o, "Cancelling multipart upload: %v", err)
cancelErr := o.cancelUploadSession(ctx, uploadURL)
if cancelErr != nil {
fs.Logf(o, "Failed to cancel multipart upload: %v (upload failed due to: %v)", cancelErr, err)
fs.Logf(o, "Failed to cancel multipart upload: %v", cancelErr)
}
})()
@@ -1941,11 +1882,11 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
return info, nil
}
// Update the content of a remote file within 4 MiB size in one single request
// Update the content of a remote file within 4MB size in one single request
// This function will set modtime after uploading, which will create a new version for the remote file
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
}
fs.Debugf(o, "Starting singlepart upload")

View File

@@ -88,7 +88,7 @@ func init() {
Note that these chunks are buffered in memory so increasing them will
increase memory use.`,
Default: 10 * fs.Mebi,
Default: 10 * fs.MebiByte,
Advanced: true,
}},
})

View File

@@ -12,6 +12,7 @@ import (
"context"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
@@ -71,7 +72,7 @@ func init() {
Name: "pcloud",
Description: "Pcloud",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
optc := new(Options)
err := configstruct.Set(m, optc)
if err != nil {
@@ -93,11 +94,14 @@ func init() {
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
opt := oauthutil.Options{
CheckAuth: checkAuth,
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
})
}
err = oauthutil.Config(ctx, "pcloud", name, m, oauthConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -20,6 +20,7 @@ import (
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"net/url"
@@ -77,10 +78,11 @@ func init() {
Name: "premiumizeme",
Description: "premiumize.me",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "premiumizeme", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: "api_key",

View File

@@ -2,6 +2,7 @@ package putio
import (
"context"
"log"
"regexp"
"time"
@@ -34,7 +35,7 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultChunkSize = 48 * fs.Mebi
defaultChunkSize = 48 * fs.MebiByte
)
var (
@@ -59,11 +60,14 @@ func init() {
Name: "putio",
Description: "Put.io",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: putioConfig,
NoOffline: true,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := oauthutil.Options{
NoOffline: true,
}
err := oauthutil.Config(ctx, "putio", name, m, putioConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: config.ConfigEncoding,

View File

@@ -80,7 +80,7 @@ func init() {
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`,
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {

View File

@@ -26,6 +26,7 @@ import (
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
@@ -58,7 +59,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, SeaweedFS, and Tencent COS",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
@@ -91,9 +92,6 @@ func init() {
}, {
Value: "Scaleway",
Help: "Scaleway Object Storage",
}, {
Value: "SeaweedFS",
Help: "SeaweedFS S3",
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
@@ -595,10 +593,6 @@ func init() {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "localhost:8333",
Help: "SeaweedFS S3 localhost",
Provider: "SeaweedFS",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
@@ -1023,7 +1017,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`,
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
@@ -1045,9 +1039,9 @@ Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5 MiB and there can be at
chunk_size. Since the default chunk size is 5MB and there can be at
most 10,000 chunks, this means that by default the maximum size of
a file you can stream upload is 48 GiB. If you wish to stream upload
a file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
Default: minChunkSize,
Advanced: true,
@@ -1073,7 +1067,7 @@ large file of a known size to stay below this number of chunks limit.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5 GiB.`,
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
@@ -1227,11 +1221,6 @@ very small even with this flag.
`,
Default: false,
Advanced: true,
}, {
Name: "no_head_object",
Help: `If set, don't HEAD objects`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -1282,7 +1271,7 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
const (
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
// The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
maxSizeForCopy = 4768 * 1024 * 1024
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
@@ -1330,7 +1319,6 @@ type Options struct {
ListChunk int64 `config:"list_chunk"`
NoCheckBucket bool `config:"no_check_bucket"`
NoHead bool `config:"no_head"`
NoHeadObject bool `config:"no_head_object"`
Enc encoder.MultiEncoder `config:"encoding"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
@@ -1523,6 +1511,11 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
}),
ExpiryWindow: 3 * time.Minute,
},
// Pick up IAM role if we are in EKS
&stscreds.WebIdentityRoleProvider{
ExpiryWindow: 3 * time.Minute,
},
}
cred := credentials.NewChainCredentials(providers)
@@ -1700,7 +1693,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
GetTier: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
@@ -1737,7 +1730,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
o.setMD5FromEtag(aws.StringValue(info.ETag))
o.bytes = aws.Int64Value(info.Size)
o.storageClass = aws.StringValue(info.StorageClass)
} else if !o.fs.opt.NoHeadObject {
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
return nil, err
@@ -2838,23 +2831,15 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if err != nil {
return err
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
}
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
return nil
}
func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *time.Time, meta map[string]*string, mimeType *string, storageClass *string) {
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if contentLength != nil {
size = *contentLength
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.setMD5FromEtag(aws.StringValue(etag))
o.setMD5FromEtag(aws.StringValue(resp.ETag))
o.bytes = size
o.meta = meta
o.meta = resp.Metadata
if o.meta == nil {
o.meta = map[string]*string{}
}
@@ -2869,13 +2854,15 @@ func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *t
o.md5 = hex.EncodeToString(md5sumBytes)
}
}
o.storageClass = aws.StringValue(storageClass)
if lastModified == nil {
o.storageClass = aws.StringValue(resp.StorageClass)
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *lastModified
o.lastModified = *resp.LastModified
}
o.mimeType = aws.StringValue(mimeType)
o.mimeType = aws.StringValue(resp.ContentType)
return nil
}
// ModTime returns the modification time of the object
@@ -2985,26 +2972,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, err
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified: %v", err)
}
// read size from ContentLength or ContentRange
size := resp.ContentLength
if resp.ContentRange != nil {
var contentRange = *resp.ContentRange
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
size = &i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
o.setMetaData(resp.ETag, size, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
return resp.Body, nil
}
@@ -3030,9 +2997,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
// calculate size of parts
partSize := int(f.opt.ChunkSize)
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
// 48GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
@@ -3041,7 +3008,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
} else {
// Adjust partSize until the number of parts is small enough.
if size/int64(partSize) >= uploadParts {
// Calculate partition size rounded up to the nearest MiB
// Calculate partition size rounded up to the nearest MB
partSize = int((((size / uploadParts) >> 20) + 1) << 20)
}
}

View File

@@ -296,86 +296,86 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
// Config callback for 2FA
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
func Config(ctx context.Context, name string, m configmap.Mapper) {
ci := fs.GetConfig(ctx)
serverURL, ok := m.Get(configURL)
if !ok || serverURL == "" {
// If there's no server URL, it means we're trying an operation at the backend level, like a "rclone authorize seafile"
return nil, errors.New("operation not supported on this remote. If you need a 2FA code on your account, use the command: rclone config reconnect <remote name>: ")
fmt.Print("\nOperation not supported on this remote.\nIf you need a 2FA code on your account, use the command:\n\nrclone config reconnect <remote name>:\n\n")
return
}
// Stop if we are running non-interactive config
if ci.AutoConfirm {
return
}
u, err := url.Parse(serverURL)
if err != nil {
return nil, errors.Errorf("invalid server URL %s", serverURL)
fs.Errorf(nil, "Invalid server URL %s", serverURL)
return
}
is2faEnabled, _ := m.Get(config2FA)
if is2faEnabled != "true" {
return nil, errors.New("two-factor authentication is not enabled on this account")
fmt.Println("Two-factor authentication is not enabled on this account.")
return
}
username, _ := m.Get(configUser)
if username == "" {
return nil, errors.New("a username is required")
fs.Errorf(nil, "A username is required")
return
}
password, _ := m.Get(configPassword)
if password != "" {
password, _ = obscure.Reveal(password)
}
// Just make sure we do have a password
for password == "" {
fmt.Print("Two-factor authentication: please enter your password (it won't be saved in the configuration)\npassword> ")
password = config.ReadPassword()
}
switch config.State {
case "":
// Just make sure we do have a password
if password == "" {
return fs.ConfigPassword("", "config_password", "Two-factor authentication: please enter your password (it won't be saved in the configuration)")
}
return fs.ConfigGoto("password")
case "password":
password = config.Result
if password == "" {
return fs.ConfigError("password", "Password can't be blank")
}
m.Set(configPassword, obscure.MustObscure(config.Result))
return fs.ConfigGoto("2fa")
case "2fa":
return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code")
case "2fa_do":
code := config.Result
if code == "" {
return fs.ConfigError("2fa", "2FA codes can't be blank")
// Create rest client for getAuthorizationToken
url := u.String()
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
for {
code := ""
for code == "" {
fmt.Print("Two-factor authentication: please enter your 2FA code\n2fa code> ")
code = config.ReadLine()
}
// Create rest client for getAuthorizationToken
url := u.String()
if !strings.HasPrefix(url, "/") {
url += "/"
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(url)
// We loop asking for a 2FA code
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
fmt.Println("Authenticating...")
token, err := getAuthorizationToken(ctx, srv, username, password, code)
if err != nil {
return fs.ConfigConfirm("2fa_error", true, "config_retry", fmt.Sprintf("Authentication failed: %v\n\nTry Again?", err))
fmt.Printf("Authentication failed: %v\n", err)
tryAgain := strings.ToLower(config.ReadNonEmptyLine("Do you want to try again (y/n)?"))
if tryAgain != "y" && tryAgain != "yes" {
// The user is giving up, we're done here
break
}
}
if token == "" {
return fs.ConfigConfirm("2fa_error", true, "config_retry", "Authentication failed - no token returned.\n\nTry Again?")
if token != "" {
fmt.Println("Success!")
// Let's save the token into the configuration
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
// And we're done here
break
}
// Let's save the token into the configuration
m.Set(configAuthToken, token)
// And delete any previous entry for password
m.Set(configPassword, "")
// And we're done here
return nil, nil
case "2fa_error":
if config.Result == "true" {
return fs.ConfigGoto("2fa")
}
return nil, errors.New("2fa authentication failed")
}
return nil, fmt.Errorf("unknown state %q", config.State)
}
// sets the AuthorizationToken up

View File

@@ -224,17 +224,6 @@ have a server which returns
Then you may need to enable this flag.
If concurrent reads are disabled, the use_fstat option is ignored.
`,
Advanced: true,
}, {
Name: "disable_concurrent_writes",
Default: false,
Help: `If set don't use concurrent writes
Normally rclone uses concurrent writes to upload files. This improves
the performance greatly, especially for distant servers.
This option disables concurrent writes should that be necessary.
`,
Advanced: true,
}, {
@@ -255,30 +244,29 @@ Set to 0 to keep connections indefinitely.
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
User string `config:"user"`
Port string `config:"port"`
Pass string `config:"pass"`
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
AskPassword bool `config:"ask_password"`
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
UseFstat bool `config:"use_fstat"`
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
DisableConcurrentWrites bool `config:"disable_concurrent_writes"`
IdleTimeout fs.Duration `config:"idle_timeout"`
Host string `config:"host"`
User string `config:"user"`
Port string `config:"port"`
Pass string `config:"pass"`
KeyPem string `config:"key_pem"`
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
AskPassword bool `config:"ask_password"`
PathOverride string `config:"path_override"`
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
Subsystem string `config:"subsystem"`
ServerCommand string `config:"server_command"`
UseFstat bool `config:"use_fstat"`
DisableConcurrentReads bool `config:"disable_concurrent_reads"`
IdleTimeout fs.Duration `config:"idle_timeout"`
}
// Fs stores the interface to the remote SFTP files
@@ -426,8 +414,8 @@ func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.C
opts = opts[:len(opts):len(opts)] // make sure we don't overwrite the callers opts
opts = append(opts,
sftp.UseFstat(f.opt.UseFstat),
sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
sftp.UseConcurrentWrites(!f.opt.DisableConcurrentWrites),
// FIXME disabled after library reversion
// sftp.UseConcurrentReads(!f.opt.DisableConcurrentReads),
)
if f.opt.DisableConcurrentReads { // FIXME
fs.Errorf(f, "Ignoring disable_concurrent_reads after library reversion - see #5197")
@@ -566,7 +554,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(env.ShellExpand(opt.KnownHostsFile))
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
}
@@ -1506,19 +1494,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return in, nil
}
type sizeReader struct {
io.Reader
size int64
}
// Size returns the expected size of the stream
//
// It is used in sftpFile.ReadFrom as a hint to work out the
// concurrency needed
func (sr *sizeReader) Size() int64 {
return sr.size
}
// Update a remote sftp file using the data <in> and ModTime from <src>
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
o.fs.addTransfer() // Show transfer in progress
@@ -1550,7 +1525,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(src, "Removed after failed upload: %v", err)
}
}
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
_, err = file.ReadFrom(in)
if err != nil {
remove()
return errors.Wrap(err, "Update ReadFrom failed")

View File

@@ -77,6 +77,7 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"path"
@@ -109,10 +110,10 @@ const (
decayConstant = 2 // bigger for slower decay, exponential
apiPath = "/sf/v3" // add to endpoint to get API path
tokenPath = "/oauth/token" // add to endpoint to get Token path
minChunkSize = 256 * fs.Kibi
maxChunkSize = 2 * fs.Gibi
defaultChunkSize = 64 * fs.Mebi
defaultUploadCutoff = 128 * fs.Mebi
minChunkSize = 256 * fs.KibiByte
maxChunkSize = 2 * fs.GibiByte
defaultChunkSize = 64 * fs.MebiByte
defaultUploadCutoff = 128 * fs.MebiByte
)
// Generate a new oauth2 config which we will update when we know the TokenURL
@@ -135,7 +136,7 @@ func init() {
Name: "sharefile",
Description: "Citrix Sharefile",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
@@ -151,10 +152,13 @@ func init() {
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
CheckAuth: checkAuth,
})
opt := oauthutil.Options{
CheckAuth: checkAuth,
}
err := oauthutil.Config(ctx, "sharefile", name, m, oauthConfig, &opt)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
},
Options: []fs.Option{{
Name: "upload_cutoff",

View File

@@ -16,6 +16,7 @@ import (
"context"
"fmt"
"io"
"log"
"net/http"
"net/url"
"path"
@@ -75,63 +76,50 @@ func init() {
Name: "sugarsync",
Description: "Sugarsync",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, errors.Wrap(err, "failed to read options")
log.Fatalf("Failed to read options: %v", err)
}
switch config.State {
case "":
if opt.RefreshToken == "" {
return fs.ConfigGoto("username")
if opt.RefreshToken != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) {
return
}
return fs.ConfigConfirm("refresh", true, "config_refresh", "Already have a token - refresh?")
case "refresh":
if config.Result == "false" {
return nil, nil
}
return fs.ConfigGoto("username")
case "username":
return fs.ConfigInput("password", "config_username", "username (email address)")
case "password":
m.Set("username", config.Result)
return fs.ConfigPassword("auth", "config_password", "Your Sugarsync password.\n\nOnly required during setup and will not be stored.")
case "auth":
username, _ := m.Get("username")
m.Set("username", "")
password := config.Result
authRequest := api.AppAuthorization{
Username: username,
Password: password,
Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
}
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
return nil, errors.Wrap(err, "failed to get token")
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
}, Options: []fs.Option{{
fmt.Printf("Username (email address)> ")
username := config.ReadLine()
password := config.GetPassword("Your Sugarsync password is only required during setup and will not be stored.")
authRequest := api.AppAuthorization{
Username: username,
Password: password,
Application: withDefault(opt.AppID, appID),
AccessKeyID: withDefault(opt.AccessKeyID, accessKeyID),
PrivateAccessKey: withDefault(opt.PrivateAccessKey, obscure.MustReveal(encryptedPrivateAccessKey)),
}
var resp *http.Response
opts := rest.Opts{
Method: "POST",
Path: "/app-authorization",
}
srv := rest.NewClient(fshttp.NewClient(ctx)).SetRoot(rootURL) // FIXME
// FIXME
//err = f.pacer.Call(func() (bool, error) {
resp, err = srv.CallXML(context.Background(), &opts, &authRequest, nil)
// return shouldRetry(ctx, resp, err)
//})
if err != nil {
log.Fatalf("Failed to get token: %v", err)
}
opt.RefreshToken = resp.Header.Get("Location")
m.Set("refresh_token", opt.RefreshToken)
},
Options: []fs.Option{{
Name: "app_id",
Help: "Sugarsync App ID.\n\nLeave blank to use rclone's.",
}, {

View File

@@ -36,7 +36,7 @@ import (
const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.Gibi
defaultChunkSize = 5 * fs.GibiByte
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
@@ -46,7 +46,7 @@ var SharedOptions = []fs.Option{{
Help: `Above this size files will be chunked into a _segments container.
Above this size files will be chunked into a _segments container. The
default for this is 5 GiB which is its maximum value.`,
default for this is 5GB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
@@ -56,7 +56,7 @@ default for this is 5 GiB which is its maximum value.`,
When doing streaming uploads (e.g. using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5 GiB. However non chunked
This will limit the maximum upload size to 5GB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
@@ -419,7 +419,7 @@ func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Con
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
const minChunkSize = fs.Byte
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}

View File

@@ -87,7 +87,7 @@ func (f *Fs) testWithChunk(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
f.opt.ChunkSize = 1024 * fs.Byte
defer func() {
//restore old config after test
f.opt.ChunkSize = preConfChunkSize
@@ -117,7 +117,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
f.opt.ChunkSize = 1024 * fs.Byte
segmentContainer := f.root + "_segments"
defer func() {
//restore config
@@ -159,7 +159,7 @@ func (f *Fs) testCopyLargeObject(t *testing.T) {
preConfChunkSize := f.opt.ChunkSize
preConfChunk := f.opt.NoChunk
f.opt.NoChunk = false
f.opt.ChunkSize = 1024 * fs.SizeSuffixBase
f.opt.ChunkSize = 1024 * fs.Byte
defer func() {
//restore old config after test
f.opt.ChunkSize = preConfChunkSize

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"io"
"log"
"path"
"strings"
"time"
@@ -41,19 +42,19 @@ func init() {
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
provider, _ := m.Get(fs.ConfigProvider)
Config: func(ctx context.Context, name string, configMapper configmap.Mapper) {
provider, _ := configMapper.Get(fs.ConfigProvider)
config.FileDeleteKey(name, fs.ConfigProvider)
if provider == newProvider {
satelliteString, _ := m.Get("satellite_address")
apiKey, _ := m.Get("api_key")
passphrase, _ := m.Get("passphrase")
satelliteString, _ := configMapper.Get("satellite_address")
apiKey, _ := configMapper.Get("api_key")
passphrase, _ := configMapper.Get("passphrase")
// satelliteString contains always default and passphrase can be empty
if apiKey == "" {
return nil, nil
return
}
satellite, found := satMap[satelliteString]
@@ -63,23 +64,22 @@ func init() {
access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase)
if err != nil {
return nil, errors.Wrap(err, "couldn't create access grant")
log.Fatalf("Couldn't create access grant: %v", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, errors.Wrap(err, "couldn't serialize access grant")
log.Fatalf("Couldn't serialize access grant: %v", err)
}
m.Set("satellite_address", satellite)
m.Set("access_grant", serializedAccess)
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serializedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")
config.FileDeleteKey(name, "passphrase")
} else {
return nil, errors.Errorf("invalid provider type: %s", provider)
log.Fatalf("Invalid provider type: %s", provider)
}
return nil, nil
},
Options: []fs.Option{
{

View File

@@ -148,7 +148,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (_ io.ReadC
case s && !e:
offset = opt.Start
case !s && e:
offset = -opt.End
object, err := o.fs.project.StatObject(ctx, bucketName, bucketPath)
if err != nil {
return nil, err
}
offset = object.System.ContentLength - opt.End
length = opt.End
}
case *fs.SeekOption:
offset = opt.Offset

View File

@@ -1,170 +0,0 @@
package api
import "fmt"
// Error contains the error code and message returned by the API
type Error struct {
Success bool `json:"success,omitempty"`
StatusCode int `json:"statusCode,omitempty"`
Message string `json:"message,omitempty"`
Data string `json:"data,omitempty"`
}
// Error returns a string for the error and satisfies the error interface
func (e Error) Error() string {
out := fmt.Sprintf("api error %d", e.StatusCode)
if e.Message != "" {
out += ": " + e.Message
}
if e.Data != "" {
out += ": " + e.Data
}
return out
}
// FolderEntry represents a Uptobox subfolder when listing folder contents
type FolderEntry struct {
FolderID uint64 `json:"fld_id"`
Description string `json:"fld_descr"`
Password string `json:"fld_password"`
FullPath string `json:"fullPath"`
Path string `json:"fld_name"`
Name string `json:"name"`
Hash string `json:"hash"`
}
// FolderInfo represents the current folder when listing folder contents
type FolderInfo struct {
FolderID uint64 `json:"fld_id"`
Hash string `json:"hash"`
FileCount uint64 `json:"fileCount"`
TotalFileSize int64 `json:"totalFileSize"`
}
// FileInfo represents a file when listing folder contents
type FileInfo struct {
Name string `json:"file_name"`
Description string `json:"file_descr"`
Created string `json:"file_created"`
Size int64 `json:"file_size"`
Downloads uint64 `json:"file_downloads"`
Code string `json:"file_code"`
Password string `json:"file_password"`
Public int `json:"file_public"`
LastDownload string `json:"file_last_download"`
ID uint64 `json:"id"`
}
// ReadMetadataResponse is the response when listing folder contents
type ReadMetadataResponse struct {
StatusCode int `json:"statusCode"`
Message string `json:"message"`
Data struct {
CurrentFolder FolderInfo `json:"currentFolder"`
Folders []FolderEntry `json:"folders"`
Files []FileInfo `json:"files"`
PageCount int `json:"pageCount"`
TotalFileCount int `json:"totalFileCount"`
TotalFileSize int64 `json:"totalFileSize"`
} `json:"data"`
}
// UploadInfo is the response when initiating an upload
type UploadInfo struct {
StatusCode int `json:"statusCode"`
Message string `json:"message"`
Data struct {
UploadLink string `json:"uploadLink"`
MaxUpload string `json:"maxUpload"`
} `json:"data"`
}
// UploadResponse is the respnse to a successful upload
type UploadResponse struct {
Files []struct {
Name string `json:"name"`
Size int64 `json:"size"`
URL string `json:"url"`
DeleteURL string `json:"deleteUrl"`
} `json:"files"`
}
// UpdateResponse is a generic response to various action on files (rename/copy/move)
type UpdateResponse struct {
Message string `json:"message"`
StatusCode int `json:"statusCode"`
}
// Download is the response when requesting a download link
type Download struct {
StatusCode int `json:"statusCode"`
Message string `json:"message"`
Data struct {
DownloadLink string `json:"dlLink"`
} `json:"data"`
}
// MetadataRequestOptions represents all the options when listing folder contents
type MetadataRequestOptions struct {
Limit uint64
Offset uint64
SearchField string
Search string
}
// CreateFolderRequest is used for creating a folder
type CreateFolderRequest struct {
Token string `json:"token"`
Path string `json:"path"`
Name string `json:"name"`
}
// DeleteFolderRequest is used for deleting a folder
type DeleteFolderRequest struct {
Token string `json:"token"`
FolderID uint64 `json:"fld_id"`
}
// CopyMoveFileRequest is used for moving/copying a file
type CopyMoveFileRequest struct {
Token string `json:"token"`
FileCodes string `json:"file_codes"`
DestinationFolderID uint64 `json:"destination_fld_id"`
Action string `json:"action"`
}
// MoveFolderRequest is used for moving a folder
type MoveFolderRequest struct {
Token string `json:"token"`
FolderID uint64 `json:"fld_id"`
DestinationFolderID uint64 `json:"destination_fld_id"`
Action string `json:"action"`
}
// RenameFolderRequest is used for renaming a folder
type RenameFolderRequest struct {
Token string `json:"token"`
FolderID uint64 `json:"fld_id"`
NewName string `json:"new_name"`
}
// UpdateFileInformation is used for renaming a file
type UpdateFileInformation struct {
Token string `json:"token"`
FileCode string `json:"file_code"`
NewName string `json:"new_name,omitempty"`
Description string `json:"description,omitempty"`
Password string `json:"password,omitempty"`
Public string `json:"public,omitempty"`
}
// RemoveFileRequest is used for deleting a file
type RemoveFileRequest struct {
Token string `json:"token"`
FileCodes string `json:"file_codes"`
}
// Token represents the authentication token
type Token struct {
Token string `json:"token"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +0,0 @@
// Test Uptobox filesystem interface
package uptobox_test
import (
"testing"
"github.com/rclone/rclone/backend/uptobox"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
*fstest.RemoteName = "TestUptobox:"
}
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*uptobox.Object)(nil),
})
}

View File

@@ -125,7 +125,7 @@ func (ca *CookieAuth) getSPCookie(conf *SharepointSuccessResponse) (*CookieRespo
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
}
u, err := url.Parse(spRoot.Scheme + "://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
if err != nil {
return nil, errors.Wrap(err, "Error while constructing login URL")
}

View File

@@ -113,21 +113,6 @@ func init() {
Name: config.ConfigEncoding,
Help: configEncodingHelp,
Advanced: true,
}, {
Name: "headers",
Help: `Set HTTP headers for all transactions
Use this to set additional HTTP headers for all transactions
The input format is comma separated list of key,value pairs. Standard
[CSV encoding](https://godoc.org/encoding/csv) may be used.
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}},
})
}
@@ -141,7 +126,6 @@ type Options struct {
BearerToken string `config:"bearer_token"`
BearerTokenCommand string `config:"bearer_token_command"`
Enc encoder.MultiEncoder `config:"encoding"`
Headers fs.CommaSepList `config:"headers"`
}
// Fs represents a remote webdav
@@ -375,12 +359,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
if len(opt.Headers)%2 != 0 {
return nil, errors.New("odd number of headers supplied")
}
fs.Debugf(nil, "found headers: %v", opt.Headers)
rootIsDir := strings.HasSuffix(root, "/")
root = strings.Trim(root, "/")
@@ -450,9 +428,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, err
}
}
if opt.Headers != nil {
f.addHeaders(opt.Headers)
}
f.srv.SetErrorHandler(errorHandler)
err = f.setQuirks(ctx, opt.Vendor)
if err != nil {
@@ -512,15 +487,6 @@ func (f *Fs) fetchBearerToken(cmd string) (string, error) {
return stdoutString, nil
}
// Adds the configured headers to the request if any
func (f *Fs) addHeaders(headers fs.CommaSepList) {
for i := 0; i < len(headers); i += 2 {
key := f.opt.Headers[i]
value := f.opt.Headers[i+1]
f.srv.SetHeader(key, value)
}
}
// fetch the bearer token and set it if successful
func (f *Fs) fetchAndSetBearerToken() error {
if f.opt.BearerTokenCommand == "" {

View File

@@ -1,74 +0,0 @@
package webdav_test
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/rclone/rclone/backend/webdav"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
remoteName = "TestWebDAV"
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
// with each request the headers option tests are executed
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server
fileServer := http.FileServer(http.Dir(""))
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
fileServer.ServeHTTP(w, r)
})
// Make the test server
ts := httptest.NewServer(handler)
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "webdav",
"url": ts.URL,
// add headers to test the headers option
"headers": strings.Join(headers, ","),
}
// return a function to tidy up
return m, ts.Close
}
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate the WebDAV server
f, err := webdav.NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f, tidy
}
// TestHeaders any request will test the headers option
func TestHeaders(t *testing.T) {
f, tidy := prepare(t)
defer tidy()
// any request will do
_, err := f.Features().About(context.Background())
require.NoError(t, err)
}

View File

@@ -60,10 +60,12 @@ func init() {
Name: "yandex",
Description: "Yandex Disk",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
Config: func(ctx context.Context, name string, m configmap.Mapper) {
err := oauthutil.Config(ctx, "yandex", name, m, oauthConfig, nil)
if err != nil {
log.Fatalf("Failed to configure token: %v", err)
return
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: config.ConfigEncoding,
@@ -249,22 +251,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
token, err := oauthutil.GetToken(name, m)
if err != nil {
return nil, errors.Wrap(err, "couldn't read OAuth token")
log.Fatalf("Couldn't read OAuth token (this should never happen).")
}
if token.RefreshToken == "" {
return nil, errors.New("unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend")
log.Fatalf("Unable to get RefreshToken. If you are upgrading from older versions of rclone, please run `rclone config` and re-configure this backend.")
}
if token.TokenType != "OAuth" {
token.TokenType = "OAuth"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
return nil, errors.Wrap(err, "couldn't save OAuth token")
log.Fatalf("Couldn't save OAuth token (this should never happen).")
}
log.Printf("Automatically upgraded OAuth config.")
}
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to configure Yandex")
log.Fatalf("Failed to configure Yandex: %v", err)
}
ci := fs.GetConfig(ctx)

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"path"
@@ -72,97 +73,41 @@ func init() {
Name: "zoho",
Description: "Zoho",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
Config: func(ctx context.Context, name string, m configmap.Mapper) {
// Need to setup region before configuring oauth
err := setupRegion(m)
setupRegion(m)
opt := oauthutil.Options{
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
}
if err := oauthutil.Config(ctx, "zoho", name, m, oauthConfig, &opt); err != nil {
log.Fatalf("Failed to configure token: %v", err)
}
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
return nil, err
log.Fatalf("Failed to read token: %v", err)
}
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to load oAuthClient")
log.Fatalf("Failed to configure token: %v", err)
}
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
return authSrv, apiSrv, nil
}
switch config.State {
case "":
return oauthutil.ConfigOut("teams", &oauthutil.Options{
OAuth2Config: oauthConfig,
// No refresh token unless ApprovalForce is set
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
})
case "teams":
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
// it's own custom type
token, err := oauthutil.GetToken(name, m)
if err != nil {
return nil, errors.Wrap(err, "failed to read token")
}
if token.TokenType != "Zoho-oauthtoken" {
token.TokenType = "Zoho-oauthtoken"
err = oauthutil.PutToken(name, m, token, false)
if err != nil {
return nil, errors.Wrap(err, "failed to configure token")
}
}
authSrv, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
// Get the user Info
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
if err != nil {
return nil, err
}
// Get the teams
teams, err := listTeams(ctx, user.ZUID, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace", "config_team_drive_id", "Team Drive ID", len(teams), func(i int) (string, string) {
team := teams[i]
return team.ID, team.Attributes.Name
})
case "workspace":
_, apiSrv, err := getSrvs()
if err != nil {
return nil, err
}
teamID := config.Result
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
if err != nil {
return nil, err
}
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
workspace := workspaces[i]
return workspace.ID, workspace.Attributes.Name
})
case "workspace_end":
worksspaceID := config.Result
m.Set(configRootID, worksspaceID)
return nil, nil
if fs.GetConfig(ctx).AutoConfirm {
return
}
if err = setupRoot(ctx, name, m); err != nil {
log.Fatalf("Failed to configure root directory: %v", err)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "region",
Help: `Zoho region to connect to.
You'll have to use the region your organization is registered in. If
not sure use the same top level domain as you connect to in your
browser.`,
Help: "Zoho region to connect to. You'll have to use the region you organization is registered in.",
Examples: []fs.OptionExample{{
Value: "com",
Help: "United states / Global",
@@ -219,16 +164,15 @@ type Object struct {
// ------------------------------------------------------------
func setupRegion(m configmap.Mapper) error {
func setupRegion(m configmap.Mapper) {
region, ok := m.Get("region")
if !ok || region == "" {
return errors.New("no region set")
log.Fatalf("No region set\n")
}
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
return nil
}
// ------------------------------------------------------------
@@ -261,6 +205,49 @@ func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api
return workspaceList.TeamWorkspace, nil
}
func setupRoot(ctx context.Context, name string, m configmap.Mapper) error {
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
}
authSrv := rest.NewClient(oAuthClient).SetRoot(accountsURL)
opts := rest.Opts{
Method: "GET",
Path: "/oauth/user/info",
}
var user api.User
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
if err != nil {
return err
}
apiSrv := rest.NewClient(oAuthClient).SetRoot(rootURL)
teams, err := listTeams(ctx, user.ZUID, apiSrv)
if err != nil {
return err
}
var teamIDs, teamNames []string
for _, team := range teams {
teamIDs = append(teamIDs, team.ID)
teamNames = append(teamNames, team.Attributes.Name)
}
teamID := config.Choose("Enter a Team Drive ID", teamIDs, teamNames, true)
workspaces, err := listWorkspaces(ctx, teamID, apiSrv)
if err != nil {
return err
}
var workspaceIDs, workspaceNames []string
for _, workspace := range workspaces {
workspaceIDs = append(workspaceIDs, workspace.ID)
workspaceNames = append(workspaceNames, workspace.Attributes.Name)
}
worksspaceID := config.Choose("Enter a Workspace ID", workspaceIDs, workspaceNames, true)
m.Set(configRootID, worksspaceID)
return nil
}
// --------------------------------------------------------------
// retryErrorCodes is a slice of error codes that we will retry
@@ -390,10 +377,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err := configstruct.Set(m, opt); err != nil {
return nil, err
}
err := setupRegion(m)
if err != nil {
return nil, err
}
setupRegion(m)
root = parsePath(root)
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)

View File

@@ -1,203 +0,0 @@
#!/usr/bin/env python3
"""
Test program to demonstrate the remote config interfaces in
rclone.
This program can simulate
rclone config create
rclone config update
rclone config password - NOT implemented yet
rclone authorize - NOT implemented yet
Pass the desired action as the first argument then any parameters.
This assumes passwords will be passed in the clear.
"""
import argparse
import subprocess
import json
from pprint import pprint
sep = "-"*60
def rpc(args, command, params):
"""
Run the command. This could be either over the CLI or the API.
Here we run over the API either using `rclone rc --loopback` which
is useful for making sure state is saved properly or to an
existing rclone rcd if `--rc` is used on the command line.
"""
if args.rc:
import requests
kwargs = {
"json": params,
}
if args.user:
kwargs["auth"] = (args.user, args.password)
r = requests.post('http://localhost:5572/'+command, **kwargs)
if r.status_code != 200:
raise ValueError(f"RC command failed: Error {r.status_code}: {r.text}")
return r.json()
cmd = ["rclone", "-vv", "rc", "--loopback", command, "--json", json.dumps(params)]
result = subprocess.run(cmd, stdout=subprocess.PIPE, check=True)
return json.loads(result.stdout)
def parse_parameters(parameters):
"""
Parse the incoming key=value parameters into a dict
"""
d = {}
for param in parameters:
parts = param.split("=", 1)
if len(parts) != 2:
raise ValueError("bad format for parameter need name=value")
d[parts[0]] = parts[1]
return d
def ask(opt):
"""
Ask the user to enter the option
This is the user interface for asking a user a question.
If there are examples they should be presented.
"""
while True:
if opt["IsPassword"]:
print("*** Inputting a password")
print(opt['Help'])
examples = opt.get("Examples", ())
or_number = ""
if len(examples) > 0:
or_number = " or choice number"
for i, example in enumerate(examples):
print(f"{i:3} value: {example['Value']}")
print(f" help: {example['Help']}")
print(f"Enter a {opt['Type']} value{or_number}. Press Enter for the default ('{opt['DefaultStr']}')")
print(f"{opt['Name']}> ", end='')
s = input()
if s == "":
return opt["DefaultStr"]
try:
i = int(s)
if i >= 0 and i < len(examples):
return examples[i]["Value"]
except ValueError:
pass
if opt["Exclusive"]:
for example in examples:
if s == example["Value"]:
return s
# Exclusive is set but the value isn't one of the accepted
# ones so continue
print("Value isn't one of the acceptable values")
else:
return s
return s
def create_or_update(what, args):
"""
Run the equivalent of rclone config create
or rclone config update
what should either be "create" or "update
"""
print(what, args)
params = parse_parameters(args.parameters)
inp = {
"name": args.name,
"parameters": params,
"opt": {
"nonInteractive": True,
"all": args.all,
"noObscure": args.obscured_passwords,
"obscure": not args.obscured_passwords,
},
}
if what == "create":
inp["type"] = args.type
while True:
print(sep)
print("Input to API")
pprint(inp)
print(sep)
out = rpc(args, "config/"+what, inp)
print(sep)
print("Output from API")
pprint(out)
print(sep)
if out["State"] == "":
return
if out["Error"]:
print("Error", out["Error"])
result = ask(out["Option"])
inp["opt"]["state"] = out["State"]
inp["opt"]["result"] = result
inp["opt"]["continue"] = True
def create(args):
"""Run the equivalent of rclone config create"""
create_or_update("create", args)
def update(args):
"""Run the equivalent of rclone config update"""
create_or_update("update", args)
def password(args):
"""Run the equivalent of rclone config password"""
print("password", args)
raise NotImplementedError()
def authorize(args):
"""Run the equivalent of rclone authorize"""
print("authorize", args)
raise NotImplementedError()
def main():
"""
Make the command line parser and dispatch
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("-a", "--all", action='store_true',
help="Ask all the config questions if set")
parser.add_argument("-o", "--obscured-passwords", action='store_true',
help="If set assume the passwords are obscured")
parser.add_argument("--rc", action='store_true',
help="If set use the rc (you'll need to start an rclone rcd)")
parser.add_argument("--user", type=str, default="",
help="Username for use with --rc")
parser.add_argument("--pass", type=str, default="", dest='password',
help="Password for use with --rc")
subparsers = parser.add_subparsers(dest='command', required=True)
subparser = subparsers.add_parser('create')
subparser.add_argument("name", type=str, help="Name of remote to create")
subparser.add_argument("type", type=str, help="Type of remote to create")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=create)
subparser = subparsers.add_parser('update')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=update)
subparser = subparsers.add_parser('password')
subparser.add_argument("name", type=str, help="Name of remote to update")
subparser.add_argument("parameters", type=str, nargs='*', help="Config parameters name=value name=value")
subparser.set_defaults(func=password)
subparser = subparsers.add_parser('authorize')
subparser.set_defaults(func=authorize)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()

View File

@@ -62,7 +62,6 @@ docs = [
"sftp.md",
"sugarsync.md",
"tardigrade.md",
"uptobox.md",
"union.md",
"webdav.md",
"yandex.md",

View File

@@ -18,6 +18,7 @@ import (
_ "github.com/rclone/rclone/cmd/copyurl"
_ "github.com/rclone/rclone/cmd/cryptcheck"
_ "github.com/rclone/rclone/cmd/cryptdecode"
_ "github.com/rclone/rclone/cmd/dbhashsum"
_ "github.com/rclone/rclone/cmd/dedupe"
_ "github.com/rclone/rclone/cmd/delete"
_ "github.com/rclone/rclone/cmd/deletefile"
@@ -53,7 +54,6 @@ import (
_ "github.com/rclone/rclone/cmd/size"
_ "github.com/rclone/rclone/cmd/sync"
_ "github.com/rclone/rclone/cmd/test"
_ "github.com/rclone/rclone/cmd/test/changenotify"
_ "github.com/rclone/rclone/cmd/test/histogram"
_ "github.com/rclone/rclone/cmd/test/info"
_ "github.com/rclone/rclone/cmd/test/makefiles"

View File

@@ -9,7 +9,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -157,12 +156,6 @@ to check all the data.
if download {
return operations.CheckDownload(context.Background(), opt)
}
hashType := fsrc.Hashes().Overlap(fdst.Hashes()).GetOne()
if hashType == hash.None {
fs.Errorf(nil, "No common hash found - not using a hash for checks")
} else {
fs.Infof(nil, "Using %v for hash comparisons", hashType)
}
return operations.Check(context.Background(), opt)
})
},

View File

@@ -49,7 +49,7 @@ var (
cpuProfile = flags.StringP("cpuprofile", "", "", "Write cpu profile to file")
memProfile = flags.StringP("memprofile", "", "", "Write memory profile to file")
statsInterval = flags.DurationP("stats", "", time.Minute*1, "Interval between printing stats, e.g 500ms, 60s, 5m. (0 to disable)")
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes' per second")
dataRateUnit = flags.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
version bool
retries = flags.IntP("retries", "", 3, "Retry operations this many times if they fail")
retriesInterval = flags.DurationP("retries-sleep", "", 0, "Interval between retrying operations if they fail, e.g 500ms, 60s, 5m. (0 to disable)")
@@ -400,7 +400,7 @@ func initConfig() {
configflags.SetFlags(ci)
// Load the config
configfile.Install()
configfile.LoadConfig(ctx)
// Start accounting
accounting.Start(ctx)

View File

@@ -22,7 +22,6 @@ func init() {
cmd.Root.AddCommand(configCommand)
configCommand.AddCommand(configEditCommand)
configCommand.AddCommand(configFileCommand)
configCommand.AddCommand(configTouchCommand)
configCommand.AddCommand(configShowCommand)
configCommand.AddCommand(configDumpCommand)
configCommand.AddCommand(configProvidersCommand)
@@ -42,9 +41,9 @@ var configCommand = &cobra.Command{
remotes and manage existing ones. You may also set or remove a
password to protect your configuration.
`,
RunE: func(command *cobra.Command, args []string) error {
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
return config.EditConfig(context.Background())
config.EditConfig(context.Background())
},
}
@@ -64,15 +63,6 @@ var configFileCommand = &cobra.Command{
},
}
var configTouchCommand = &cobra.Command{
Use: "touch",
Short: `Ensure configuration file exists.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
config.SaveConfig()
},
}
var configShowCommand = &cobra.Command{
Use: "show [<remote>]",
Short: `Print (decrypted) config file, or the config for a single remote.`,
@@ -105,14 +95,12 @@ var configProvidersCommand = &cobra.Command{
},
}
var updateRemoteOpt config.UpdateRemoteOpt
var configPasswordHelp = strings.ReplaceAll(`
Note that if the config process would normally ask a question the
default is taken (unless |--non-interactive| is used). Each time
that happens rclone will print or DEBUG a message saying how to
affect the value taken.
var (
configObscure bool
configNoObscure bool
)
const configPasswordHelp = `
If any of the parameters passed is a password field, then rclone will
automatically obscure them if they aren't already obscured before
putting them in the config file.
@@ -121,170 +109,84 @@ putting them in the config file.
consists only of base64 characters then rclone can get confused about
whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the |--obscure| flag, or if you
the passwords get obscured then use the "--obscure" flag, or if you
are 100% certain you are already passing obscured passwords then use
|--no-obscure|. You can also set obscured passwords using the
|rclone config password| command.
"--no-obscure". You can also set obscured passwords using the
"rclone config password" command.
`
The flag |--non-interactive| is for use by applications that wish to
configure rclone themeselves, rather than using rclone's text based
configuration questions. If this flag is set, and rclone needs to ask
the user a question, a JSON blob will be returned with the question in
it.
This will look something like (some irrelevant detail removed):
|||
{
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
}
|||
The format of |Option| is the same as returned by |rclone config
providers|. The question should be asked to the user and returned to
rclone as the |--result| option along with the |--state| parameter.
The keys of |Option| are used as follows:
- |Name| - name of variable - show to user
- |Help| - help text. Hard wrapped at 80 chars. Any URLs should be clicky.
- |Default| - default value - return this if the user just wants the default.
- |Examples| - the user should be able to choose one of these
- |Required| - the value should be non-empty
- |IsPassword| - the value is a password and should be edited as such
- |Type| - type of value, eg |bool|, |string|, |int| and others
- |Exclusive| - if set no free-form entry allowed only the |Examples|
- Irrelevant keys |Provider|, |ShortOpt|, |Hide|, |NoPrefix|, |Advanced|
If |Error| is set then it should be shown to the user at the same
time as the question.
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
Note that when using |--continue| all passwords should be passed in
the clear (not obscured). Any default config values should be passed
in with each invocation of |--continue|.
At the end of the non interactive process, rclone will return a result
with |State| as empty string.
If |--all| is passed then rclone will ask all the config questions,
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that |bin/config.py| in the rclone source implements this protocol
as a readable demonstration.
`, "|", "`")
var configCreateCommand = &cobra.Command{
Use: "create `name` `type` [`key` `value`]*",
Short: `Create a new remote with name, type and options.`,
Long: strings.ReplaceAll(`
Create a new remote of |name| with |type| and options. The options
should be passed in pairs of |key| |value| or as |key=value|.
Long: `
Create a new remote of ` + "`name`" + ` with ` + "`type`" + ` and options. The options
should be passed in pairs of ` + "`key` `value`" + `.
For example to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth=true
Note that if the config process would normally ask a question the
default is taken. Each time that happens rclone will print a message
saying how to affect the value taken.
` + configPasswordHelp + `
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
rclone config create mydrive drive config_is_local=false
`, "|", "`") + configPasswordHelp,
rclone config create mydrive drive config_is_local false
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
in, err := argsToMap(args[2:])
if err != nil {
return err
}
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
return config.CreateRemote(context.Background(), args[0], args[1], in, opts)
})
},
}
func doConfig(name string, in rc.Params, do func(config.UpdateRemoteOpt) (*fs.ConfigOut, error)) error {
out, err := do(updateRemoteOpt)
if err != nil {
return err
}
if !(updateRemoteOpt.NonInteractive || updateRemoteOpt.Continue) {
config.ShowRemote(name)
} else {
if out == nil {
out = &fs.ConfigOut{}
}
outBytes, err := json.MarshalIndent(out, "", "\t")
err = config.CreateRemote(context.Background(), args[0], args[1], in, configObscure, configNoObscure)
if err != nil {
return err
}
_, _ = os.Stdout.Write(outBytes)
_, _ = os.Stdout.WriteString("\n")
}
return nil
config.ShowRemote(args[0])
return nil
},
}
func init() {
for _, cmdFlags := range []*pflag.FlagSet{configCreateCommand.Flags(), configUpdateCommand.Flags()} {
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Obscure, "obscure", "", false, "Force any passwords to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.NonInteractive, "non-interactive", "", false, "Don't interact with user and return questions.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.Continue, "continue", "", false, "Continue the configuration process with an answer.")
flags.BoolVarP(cmdFlags, &updateRemoteOpt.All, "all", "", false, "Ask the full set of config questions.")
flags.StringVarP(cmdFlags, &updateRemoteOpt.State, "state", "", "", "State - use with --continue.")
flags.StringVarP(cmdFlags, &updateRemoteOpt.Result, "result", "", "", "Result - use with --continue.")
flags.BoolVarP(cmdFlags, &configObscure, "obscure", "", false, "Force any passwords to be obscured.")
flags.BoolVarP(cmdFlags, &configNoObscure, "no-obscure", "", false, "Force any passwords not to be obscured.")
}
}
var configUpdateCommand = &cobra.Command{
Use: "update `name` [`key` `value`]+",
Short: `Update options in an existing remote.`,
Long: strings.ReplaceAll(`
Long: `
Update an existing remote's options. The options should be passed in
pairs of |key| |value| or as |key=value|.
in pairs of ` + "`key` `value`" + `.
For example to update the env_auth field of a remote of name myremote
you would do:
rclone config update myremote env_auth true
rclone config update myremote env_auth=true
rclone config update myremote swift env_auth true
` + configPasswordHelp + `
If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus:
rclone config update myremote env_auth=true config_refresh_token=false
`, "|", "`") + configPasswordHelp,
rclone config update myremote swift env_auth true config_refresh_token false
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 256, command, args)
cmd.CheckArgs(3, 256, command, args)
in, err := argsToMap(args[1:])
if err != nil {
return err
}
return doConfig(args[0], in, func(opts config.UpdateRemoteOpt) (*fs.ConfigOut, error) {
return config.UpdateRemote(context.Background(), args[0], in, opts)
})
err = config.UpdateRemote(context.Background(), args[0], in, configObscure, configNoObscure)
if err != nil {
return err
}
config.ShowRemote(args[0])
return nil
},
}
@@ -300,21 +202,19 @@ var configDeleteCommand = &cobra.Command{
var configPasswordCommand = &cobra.Command{
Use: "password `name` [`key` `value`]+",
Short: `Update password in an existing remote.`,
Long: strings.ReplaceAll(`
Long: `
Update an existing remote's password. The password
should be passed in pairs of |key| |password| or as |key=password|.
The |password| should be passed in in clear (unobscured).
should be passed in pairs of ` + "`key` `value`" + `.
For example to set password of a remote of name myremote you would do:
rclone config password myremote fieldname mypassword
rclone config password myremote fieldname=mypassword
This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly.
`, "|", "`"),
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 256, command, args)
cmd.CheckArgs(3, 256, command, args)
in, err := argsToMap(args[1:])
if err != nil {
return err
@@ -328,24 +228,16 @@ both support obscuring passwords directly.
},
}
// This takes a list of arguments in key value key value form, or
// key=value key=value form and converts it into a map
// This takes a list of arguments in key value key value form and
// converts it into a map
func argsToMap(args []string) (out rc.Params, err error) {
if len(args)%2 != 0 {
return nil, errors.New("found key without value")
}
out = rc.Params{}
for i := 0; i < len(args); i++ {
key := args[i]
equals := strings.IndexRune(key, '=')
var value string
if equals >= 0 {
key, value = key[:equals], key[equals+1:]
} else {
i++
if i >= len(args) {
return nil, errors.New("found key without value")
}
value = args[i]
}
out[key] = value
// Set the config
for i := 0; i < len(args); i += 2 {
out[args[i]] = args[i+1]
}
return out, nil
}
@@ -363,11 +255,15 @@ This normally means going through the interactive oauth flow again.
RunE: func(command *cobra.Command, args []string) error {
ctx := context.Background()
cmd.CheckArgs(1, 1, command, args)
fsInfo, configName, _, m, err := fs.ConfigFs(args[0])
fsInfo, configName, _, config, err := fs.ConfigFs(args[0])
if err != nil {
return err
}
return config.PostConfig(ctx, configName, m, fsInfo)
if fsInfo.Config == nil {
return errors.Errorf("%s: doesn't support Reconnect", configName)
}
fsInfo.Config(ctx, configName, config)
return nil
},
}

View File

@@ -1,59 +0,0 @@
package config
import (
"fmt"
"testing"
"github.com/rclone/rclone/fs/rc"
"github.com/stretchr/testify/assert"
)
func TestArgsToMap(t *testing.T) {
for _, test := range []struct {
args []string
want rc.Params
wantErr bool
}{
{
args: []string{},
want: rc.Params{},
},
{
args: []string{"hello", "42"},
want: rc.Params{"hello": "42"},
},
{
args: []string{"hello", "42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye", "43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello=42", "bye=43"},
want: rc.Params{"hello": "42", "bye": "43"},
},
{
args: []string{"hello", "42", "bye", "43", "unused"},
wantErr: true,
},
{
args: []string{"hello=42", "bye=43", "unused"},
wantErr: true,
},
} {
what := fmt.Sprintf("args = %#v", test.args)
got, err := argsToMap(test.args)
if test.wantErr {
assert.Error(t, err, what)
} else {
assert.NoError(t, err, what)
assert.Equal(t, test.want, got, what)
}
}
}

View File

@@ -37,8 +37,8 @@ Download a URL's content and copy it to the destination without saving
it in temporary storage.
Setting ` + "`--auto-filename`" + ` will cause the file name to be retrieved from
the URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resulting file name will
the from URL (after any redirections) and used in the destination
path. With ` + "`--print-filename`" + ` in addition, the resuling file name will
be printed.
Setting ` + "`--no-clobber`" + ` will prevent overwriting file on the

View File

@@ -0,0 +1,51 @@
package dbhashsum
import (
"context"
"github.com/rclone/rclone/backend/dropbox"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/hashsum"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
hashsum.AddHashFlags(cmdFlags)
}
var commandDefinition = &cobra.Command{
Use: "dbhashsum remote:path",
Short: `Produces a Dropbox hash file for all the objects in the path.`,
Long: `
Produces a Dropbox hash file for all the objects in the path. The
hashes are calculated according to [Dropbox content hash
rules](https://www.dropbox.com/developers/reference/content-hash).
The output is in the same format as md5sum and sha1sum.
By default, the hash is requested from the remote. If Dropbox hash is
not supported by the remote, no hash will be returned. With the
download flag, the file will be downloaded from the remote and
hashed locally enabling Dropbox hash for any remote.
`,
Hidden: true,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
fs.Logf(nil, `"rclone dbhashsum" is deprecated, use "rclone hashsum %v %s" instead`, dropbox.DbHashType, args[0])
cmd.Run(false, false, command, func() error {
if hashsum.HashsumOutfile == "" {
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, nil)
}
output, close, err := hashsum.GetHashsumOutput(hashsum.HashsumOutfile)
if err != nil {
return err
}
defer close()
return operations.HashLister(context.Background(), dropbox.DbHashType, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
})
},
}

View File

@@ -36,8 +36,8 @@ If you supply the |--rmdirs| flag, it will remove all empty directories along wi
You can also use the separate command |rmdir| or |rmdirs| to
delete empty directories only.
For example, to delete all files bigger than 100 MiB, you may first want to
check what would be deleted (use either):
For example, to delete all files bigger than 100MBytes, you may first want to check what
would be deleted (use either):
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
@@ -46,8 +46,8 @@ Then proceed with the actual delete:
rclone --min-size 100M delete remote:path
That reads "delete everything with a minimum size of 100 MiB", hence
delete all files bigger than 100 MiB.
That reads "delete everything with a minimum size of 100 MB", hence
delete all files bigger than 100MBytes.
**Important**: Since this can cause data loss, test first with the
|--dry-run| or the |--interactive|/|-i| flag.

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"os"
"strings"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
@@ -16,7 +15,7 @@ import (
"github.com/spf13/pflag"
)
// Global hashsum flags for reuse in hashsum, md5sum, sha1sum
// Global hashsum flags for reuse in md5sum, sha1sum, and dbhashsum
var (
OutputBase64 = false
DownloadFlag = false
@@ -29,7 +28,7 @@ func init() {
AddHashFlags(cmdFlags)
}
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum, and dbhashsum
func AddHashFlags(cmdFlags *pflag.FlagSet) {
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
@@ -70,17 +69,23 @@ hashed locally enabling any hash for any remote.
Run without a hash to see the list of all supported hashes, e.g.
$ rclone hashsum
` + hashListHelp(" ") + `
Supported hashes are:
* MD5
* SHA-1
* DropboxHash
* QuickXorHash
Then
$ rclone hashsum MD5 remote:path
Note that hash names are case insensitive.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 2, command, args)
if len(args) == 0 {
fmt.Print(hashListHelp(""))
fmt.Printf("Supported hashes are:\n")
for _, ht := range hash.Supported().Array() {
fmt.Printf(" * %v\n", ht)
}
return nil
} else if len(args) == 1 {
return errors.New("need hash type and remote")
@@ -88,7 +93,6 @@ Note that hash names are case insensitive.
var ht hash.Type
err := ht.Set(args[0])
if err != nil {
fmt.Println(hashListHelp(""))
return err
}
fsrc := cmd.NewFsSrc(args[1:])
@@ -107,14 +111,3 @@ Note that hash names are case insensitive.
return nil
},
}
func hashListHelp(indent string) string {
var help strings.Builder
help.WriteString(indent)
help.WriteString("Supported hashes are:\n")
for _, ht := range hash.Supported().Array() {
help.WriteString(indent)
fmt.Fprintf(&help, " * %v\n", ht.String())
}
return help.String()
}

View File

@@ -206,9 +206,9 @@ When that happens, it is the user's responsibility to stop the mount manually.
The size of the mounted file system will be set according to information retrieved
from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
command. Remotes with unlimited storage may report the used size only,
then an additional 1 PiB of free space is assumed. If the remote does not
then an additional 1PB of free space is assumed. If the remote does not
[support](https://rclone.org/overview/#optional-features) the about feature
at all, then 1 PiB is set as both the total and the free size.
at all, then 1PB is set as both the total and the free size.
**Note**: As of |rclone| 1.52.2, |rclone mount| now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
@@ -342,38 +342,19 @@ by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to t
#### Windows caveats
Drives created as Administrator are not visible to other accounts,
not even an account that was elevated to Administrator with the
User Account Control (UAC) feature. A result of this is that if you mount
to a drive letter from a Command Prompt run as Administrator, and then try
to access the same drive from Windows Explorer (which does not run as
Administrator), you will not be able to see the mounted drive.
Note that drives created as Administrator are not visible by other
accounts (including the account that was elevated as
Administrator). So if you start a Windows drive from an Administrative
Command Prompt and then try to access the same drive from Explorer
(which does not run as Administrator), you will not be able to see the
new drive.
If you don't need to access the drive from applications running with
administrative privileges, the easiest way around this is to always
create the mount from a non-elevated command prompt.
To make mapped drives available to the user account that created them
regardless if elevated or not, there is a special Windows setting called
[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
that can be enabled.
It is also possible to make a drive mount available to everyone on the system,
by running the process creating it as the built-in SYSTEM account.
There are several ways to do this: One is to use the command-line
utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
from Microsoft's Sysinternals suite, which has option |-s| to start
processes as the SYSTEM account. Another alternative is to run the mount
command from a Windows Scheduled Task, or a Windows Service, configured
to run as the SYSTEM account. A third alternative is to use the
[WinFsp.Launcher infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture)).
Note that when running rclone as another user, it will not use
the configuration file from your profile unless you tell it to
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
Read more in the [install documentation](https://rclone.org/install/).
Note that mapping to a directory path, instead of a drive letter,
does not suffer from the same limitations.
The easiest way around this is to start the drive from a normal
command prompt. It is also possible to start a drive from the SYSTEM
account (using [the WinFsp.Launcher
infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Architecture))
which creates drives accessible for everyone on the system or
alternatively using [the nssm service manager](https://nssm.cc/usage).
### Limitations

View File

@@ -21,7 +21,7 @@ import (
func TestRc(t *testing.T) {
ctx := context.Background()
configfile.Install()
configfile.LoadConfig(ctx)
mount := rc.Calls.Get("mount/mount")
assert.NotNil(t, mount)
unmount := rc.Calls.Get("mount/unmount")

View File

@@ -373,7 +373,7 @@ func (u *UI) Draw() error {
extras := ""
if u.showCounts {
if count > 0 {
extras += fmt.Sprintf("%8v ", fs.CountSuffix(count))
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(count))
} else {
extras += " "
}
@@ -385,9 +385,9 @@ func (u *UI) Draw() error {
}
if u.showDirAverageSize {
if averageSize > 0 {
extras += fmt.Sprintf("%9v ", fs.SizeSuffix(int64(averageSize)))
extras += fmt.Sprintf("%8v ", fs.SizeSuffix(int64(averageSize)))
} else {
extras += " "
extras += " "
}
}
@@ -406,7 +406,7 @@ func (u *UI) Draw() error {
}
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
}
Linef(0, y, w, fg, bg, ' ', "%c %9v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
Linef(0, y, w, fg, bg, ' ', "%c %8v %s%c%s%s", fileFlag, fs.SizeSuffix(size), extras, mark, path.Base(entry.Remote()), message)
y++
}
}
@@ -485,15 +485,11 @@ func (u *UI) removeEntry(pos int) {
// delete the entry at the current position
func (u *UI) delete() {
if u.d == nil || len(u.entries) == 0 {
return
}
ctx := context.Background()
cursorPos := u.dirPosMap[u.path]
dirPos := u.sortPerm[cursorPos.entry]
dirEntry := u.entries[dirPos]
dirPos := u.sortPerm[u.dirPosMap[u.path].entry]
entry := u.entries[dirPos]
u.boxMenu = []string{"cancel", "confirm"}
if obj, isFile := dirEntry.(fs.Object); isFile {
if obj, isFile := entry.(fs.Object); isFile {
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
if o != 1 {
return "Aborted!", nil
@@ -503,33 +499,27 @@ func (u *UI) delete() {
return "", err
}
u.removeEntry(dirPos)
if cursorPos.entry >= len(u.entries) {
u.move(-1) // move back onto a valid entry
}
return "Successfully deleted file!", nil
}
u.popupBox([]string{
"Delete this file?",
u.fsName + dirEntry.String()})
u.fsName + entry.String()})
} else {
u.boxMenuHandler = func(f fs.Fs, p string, o int) (string, error) {
if o != 1 {
return "Aborted!", nil
}
err := operations.Purge(ctx, f, dirEntry.String())
err := operations.Purge(ctx, f, entry.String())
if err != nil {
return "", err
}
u.removeEntry(dirPos)
if cursorPos.entry >= len(u.entries) {
u.move(-1) // move back onto a valid entry
}
return "Successfully purged folder!", nil
}
u.popupBox([]string{
"Purge this directory?",
"ALL files in it will be deleted",
u.fsName + dirEntry.String()})
u.fsName + entry.String()})
}
}

View File

@@ -7,19 +7,12 @@ import (
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
var (
size = int64(-1)
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.Int64VarP(cmdFlags, &size, "size", "", size, "File size hint to preallocate")
}
var commandDefinition = &cobra.Command{
@@ -44,13 +37,6 @@ must fit into RAM. The cutoff needs to be small enough to adhere
the limits of your remote, please see there. Generally speaking,
setting this cutoff too high will decrease your performance.
Use the |--size| flag to preallocate the file in advance at the remote end
and actually stream it, even if remote backend doesn't support streaming.
|--size| should be the exact size of the input stream in bytes. If the
size of the stream is different in length to the |--size| passed in
then the transfer will likely fail.
Note that the upload can also not be retried because the data is
not kept around until the upload succeeds. If you need to transfer
a lot of data, you're better off caching locally and then
@@ -65,7 +51,7 @@ a lot of data, you're better off caching locally and then
fdst, dstFileName := cmd.NewFsDstFile(args)
cmd.Run(false, false, command, func() error {
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now())
_, err := operations.Rcat(context.Background(), fdst, dstFileName, os.Stdin, time.Now())
return err
})
},

View File

@@ -337,8 +337,8 @@ func makeRandomExeName(baseName, extension string) (string, error) {
func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, packageFormat string) error {
osName := runtime.GOOS
arch := runtime.GOARCH
if osName == "darwin" {
osName = "osx"
if arch == "darwin" {
arch = "osx"
}
archiveFilename := fmt.Sprintf("rclone-%s-%s-%s.%s", version, osName, arch, packageFormat)

View File

@@ -41,7 +41,7 @@ func startServer(t *testing.T, f fs.Fs) {
}
func TestInit(t *testing.T) {
configfile.Install()
configfile.LoadConfig(context.Background())
f, err := fs.NewFs(context.Background(), "testdata/files")
l, _ := f.List(context.Background(), "")

View File

@@ -1,94 +0,0 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
)
// Help describes the options for the serve package
var Help = `--template allows a user to specify a custom markup template for http
and webdav serve functions. The server exports the following markup
to be used within the template to server pages:
| Parameter | Description |
| :---------- | :---------- |
| .Name | The full path of a file/directory. |
| .Title | Directory listing of .Name |
| .Sort | The current sort used. This is changeable via ?sort= parameter |
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
| .Order | The current ordering used. This is changeable via ?order= parameter |
| | Order Options: asc,desc (default asc) |
| .Query | Currently unused. |
| .Breadcrumb | Allows for creating a relative navigation |
|-- .Link | The relative to the root link of the Text. |
|-- .Text | The Name of the directory. |
| .Entries | Information about a specific file/directory. |
|-- .URL | The 'url' of an entry. |
|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. |
|-- .IsDir | Boolean for if an entry is a directory or not. |
|-- .Size | Size in Bytes of the entry. |
|-- .ModTime | The UTC timestamp of an entry. |
`
// Options for the templating functionality
type Options struct {
Template string
}
// AddFlags for the templating functionality
func AddFlags(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User Specified Template.")
}
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// GetTemplate returns the HTML template for serving directories via HTTP/Webdav
func GetTemplate(tmpl string) (tpl *template.Template, err error) {
var templateString string
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
templateString = string(templateFile)
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -1,9 +1,7 @@
package http
import (
"html/template"
"io"
"log"
"net/http"
"os"
"path"
@@ -11,35 +9,19 @@ import (
"strings"
"time"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
httplib "github.com/rclone/rclone/lib/http"
"github.com/rclone/rclone/lib/http/auth"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
)
// Options required for http server
type Options struct {
data.Options
}
// DefaultOpt is the default values used for Options
var DefaultOpt = Options{}
// Opt is options set by command line flags
var Opt = DefaultOpt
func init() {
data.AddFlags(Command.Flags(), "", &Opt.Options)
httplib.AddFlags(Command.Flags())
auth.AddFlags(Command.Flags())
httpflags.AddFlags(Command.Flags())
vfsflags.AddFlags(Command.Flags())
}
@@ -58,17 +40,17 @@ The server will log errors. Use -v to see access logs.
--bwlimit will be respected for file transfers. Use --stats to
control the stats printing.
` + httplib.Help + data.Help + auth.Help + vfs.Help,
` + httplib.Help + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
cmd.Run(false, true, command, func() error {
s := newServer(f, Opt.Template)
router, err := httplib.Router()
s := newServer(f, &httpflags.Opt)
err := s.Serve()
if err != nil {
return err
}
s.Bind(router)
s.Wait()
return nil
})
},
@@ -76,37 +58,49 @@ control the stats printing.
// server contains everything to run the server
type server struct {
f fs.Fs
vfs *vfs.VFS
HTMLTemplate *template.Template // HTML template for web interface
*httplib.Server
f fs.Fs
vfs *vfs.VFS
}
func newServer(f fs.Fs, templatePath string) *server {
htmlTemplate, templateErr := data.GetTemplate(templatePath)
if templateErr != nil {
log.Fatalf(templateErr.Error())
}
func newServer(f fs.Fs, opt *httplib.Options) *server {
mux := http.NewServeMux()
s := &server{
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
HTMLTemplate: htmlTemplate,
Server: httplib.NewServer(mux, opt),
f: f,
vfs: vfs.New(f, &vfsflags.Opt),
}
mux.HandleFunc(s.Opt.BaseURL+"/", s.handler)
return s
}
func (s *server) Bind(router chi.Router) {
router.Use(
middleware.SetHeader("Accept-Ranges", "bytes"),
middleware.SetHeader("Server", "rclone/"+fs.Version),
)
router.Get("/*", s.handler)
router.Head("/*", s.handler)
// Serve runs the http server in the background.
//
// Use s.Close() and s.Wait() to shutdown server
func (s *server) Serve() error {
err := s.Server.Serve()
if err != nil {
return err
}
fs.Logf(s.f, "Serving on %s", s.URL())
return nil
}
// handler reads incoming requests and dispatches them
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
isDir := strings.HasSuffix(r.URL.Path, "/")
remote := strings.Trim(r.URL.Path, "/")
if r.Method != "GET" && r.Method != "HEAD" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Accept-Ranges", "bytes")
w.Header().Set("Server", "rclone/"+fs.Version)
urlPath, ok := s.Path(w, r)
if !ok {
return
}
isDir := strings.HasSuffix(urlPath, "/")
remote := strings.Trim(urlPath, "/")
if isDir {
s.serveDir(w, r, remote)
} else {

View File

@@ -10,10 +10,10 @@ import (
"time"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/filter"
httplib "github.com/rclone/rclone/lib/http"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -32,13 +32,10 @@ const (
func startServer(t *testing.T, f fs.Fs) {
opt := httplib.DefaultOpt
opt.ListenAddr = testBindAddress
httpServer = newServer(f, testTemplate)
router, err := httplib.Router()
if err != nil {
t.Fatal(err.Error())
}
httpServer.Bind(router)
testURL = httplib.URL()
opt.Template = testTemplate
httpServer = newServer(f, &opt)
assert.NoError(t, httpServer.Serve())
testURL = httpServer.Server.URL()
// try to connect to the test server
pause := time.Millisecond
@@ -64,7 +61,7 @@ var (
func TestInit(t *testing.T) {
ctx := context.Background()
// Configure the remote
configfile.Install()
configfile.LoadConfig(context.Background())
// fs.Config.LogLevel = fs.LogLevelDebug
// fs.Config.DumpHeaders = true
// fs.Config.DumpBodies = true
@@ -230,5 +227,6 @@ func TestGET(t *testing.T) {
}
func TestFinalise(t *testing.T) {
_ = httplib.Shutdown()
httpServer.Close()
httpServer.Wait()
}

View File

@@ -1 +1 @@
Method Not Allowed
Method not allowed

View File

@@ -1 +1 @@
Method Not Allowed
Method not allowed

View File

@@ -1,6 +1,4 @@
// Package httplib provides common functionality for http servers
//
// Deprecated: httplib has been replaced with lib/http
package httplib
import (
@@ -19,7 +17,7 @@ import (
auth "github.com/abbot/go-http-auth"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/serve/http/data"
"github.com/rclone/rclone/cmd/serve/httplib/serve/data"
"github.com/rclone/rclone/fs"
)

View File

@@ -0,0 +1,56 @@
//go:generate go run assets_generate.go
// The "go:generate" directive compiles static assets by running assets_generate.go
package data
import (
"html/template"
"io/ioutil"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
)
// AfterEpoch returns the time since the epoch for the given time
func AfterEpoch(t time.Time) bool {
return t.After(time.Time{})
}
// GetTemplate returns the HTML template for serving directories via HTTP/Webdav
func GetTemplate(tmpl string) (tpl *template.Template, err error) {
var templateString string
if tmpl == "" {
templateFile, err := Assets.Open("index.html")
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
defer fs.CheckClose(templateFile, &err)
templateBytes, err := ioutil.ReadAll(templateFile)
if err != nil {
return nil, errors.Wrap(err, "get template read")
}
templateString = string(templateBytes)
} else {
templateFile, err := ioutil.ReadFile(tmpl)
if err != nil {
return nil, errors.Wrap(err, "get template open")
}
templateString = string(templateFile)
}
funcMap := template.FuncMap{
"afterEpoch": AfterEpoch,
}
tpl, err = template.New("index").Funcs(funcMap).Parse(templateString)
if err != nil {
return nil, errors.Wrap(err, "get template parse")
}
return
}

View File

@@ -367,7 +367,7 @@ footer {
}
};
function readableFileSize(size) {
var units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];
var units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
var i = 0;
while(size >= 1024) {
size /= 1024;

View File

@@ -10,14 +10,13 @@ import (
"testing"
"time"
"github.com/rclone/rclone/cmd/serve/httplib/serve/data"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/rclone/rclone/cmd/serve/http/data"
)
func GetTemplate(t *testing.T) *template.Template {
htmlTemplate, err := data.GetTemplate("../../../cmd/serve/http/testdata/golden/testindex.html")
htmlTemplate, err := data.GetTemplate("../../http/testdata/golden/testindex.html")
require.NoError(t, err)
return htmlTemplate
}

View File

@@ -15,13 +15,13 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
"golang.org/x/net/http2"

View File

@@ -1,6 +1,7 @@
package restic
import (
"context"
"crypto/rand"
"encoding/hex"
"io"
@@ -64,7 +65,8 @@ func createOverwriteDeleteSeq(t testing.TB, path string) []TestRequest {
// TestResticHandler runs tests on the restic handler code, especially in append-only mode.
func TestResticHandler(t *testing.T) {
configfile.Install()
ctx := context.Background()
configfile.LoadConfig(ctx)
buf := make([]byte, 32)
_, err := io.ReadFull(rand.Reader, buf)
require.NoError(t, err)

View File

@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"net"
"os"
"regexp"
"strings"
@@ -15,9 +14,7 @@ import (
"github.com/pkg/sftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/terminal"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"golang.org/x/crypto/ssh"
)
@@ -228,8 +225,19 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
// Wait for either subsystem "sftp" or "exec" request
if <-isSFTP {
if err := serveChannel(channel, c.handlers, c.what); err != nil {
fs.Errorf(c.what, "Failed to serve SFPT: %v", err)
fs.Debugf(c.what, "Starting SFTP server")
server := sftp.NewRequestServer(channel, c.handlers)
defer func() {
err := server.Close()
if err != nil && err != io.EOF {
fs.Debugf(c.what, "Failed to close server: %v", err)
}
}()
err = server.Serve()
if err == io.EOF || err == nil {
fs.Debugf(c.what, "exited session")
} else {
fs.Errorf(c.what, "completed with error: %v", err)
}
} else {
var rc = uint32(0)
@@ -255,54 +263,3 @@ func (c *conn) handleChannels(chans <-chan ssh.NewChannel) {
go c.handleChannel(newChannel)
}
}
func serveChannel(rwc io.ReadWriteCloser, h sftp.Handlers, what string) error {
fs.Debugf(what, "Starting SFTP server")
server := sftp.NewRequestServer(rwc, h)
defer func() {
err := server.Close()
if err != nil && err != io.EOF {
fs.Debugf(what, "Failed to close server: %v", err)
}
}()
err := server.Serve()
if err != nil && err != io.EOF {
return errors.Wrap(err, "completed with error")
}
fs.Debugf(what, "exited session")
return nil
}
func serveStdio(f fs.Fs) error {
if terminal.IsTerminal(int(os.Stdout.Fd())) {
return errors.New("refusing to run SFTP server directly on a terminal. Please let sshd start rclone, by connecting with sftp or sshfs")
}
sshChannel := &stdioChannel{
stdin: os.Stdin,
stdout: os.Stdout,
}
handlers := newVFSHandler(vfs.New(f, &vfsflags.Opt))
return serveChannel(sshChannel, handlers, "stdio")
}
type stdioChannel struct {
stdin *os.File
stdout *os.File
}
func (c *stdioChannel) Read(data []byte) (int, error) {
return c.stdin.Read(data)
}
func (c *stdioChannel) Write(data []byte) (int, error) {
return c.stdout.Write(data)
}
func (c *stdioChannel) Close() error {
err1 := c.stdin.Close()
err2 := c.stdout.Close()
if err1 != nil {
return err1
}
return err2
}

View File

@@ -27,7 +27,6 @@ type Options struct {
User string // single username
Pass string // password for user
NoAuth bool // allow no authentication on connections
Stdio bool // serve on stdio
}
// DefaultOpt is the default values used for Options
@@ -48,7 +47,6 @@ func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
flags.StringVarP(flagSet, &Opt.User, "user", "", Opt.User, "User name for authentication.")
flags.StringVarP(flagSet, &Opt.Pass, "pass", "", Opt.Pass, "Password for authentication.")
flags.BoolVarP(flagSet, &Opt.NoAuth, "no-auth", "", Opt.NoAuth, "Allow connections with no authentication if set.")
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", Opt.Stdio, "Run an sftp server on run stdin/stdout")
}
func init() {
@@ -92,11 +90,6 @@ reachable externally then supply "--addr :2022" for example.
Note that the default of "--vfs-cache-mode off" is fine for the rclone
sftp backend, but it may not be with other SFTP clients.
If --stdio is specified, rclone will serve SFTP over stdio, which can
be used with sshd via ~/.ssh/authorized_keys, for example:
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
` + vfs.Help + proxy.Help,
Run: func(command *cobra.Command, args []string) {
var f fs.Fs
@@ -107,9 +100,6 @@ be used with sshd via ~/.ssh/authorized_keys, for example:
cmd.CheckArgs(0, 0, command, args)
}
cmd.Run(false, true, command, func() error {
if Opt.Stdio {
return serveStdio(f)
}
s := newServer(context.Background(), f, &Opt)
err := s.Serve()
if err != nil {

View File

@@ -11,13 +11,13 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/httplib"
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
"github.com/rclone/rclone/cmd/serve/httplib/serve"
"github.com/rclone/rclone/cmd/serve/proxy"
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/errors"
"github.com/rclone/rclone/lib/http/serve"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"

Some files were not shown because too many files have changed in this diff Show More