mirror of
https://github.com/rclone/rclone.git
synced 2026-01-21 11:53:17 +00:00
Compare commits
121 Commits
fix-mega-b
...
fix-freebs
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cebc3d7cf0 | ||
|
|
50bb9b7bdd | ||
|
|
4537d9b5cf | ||
|
|
684dbe0e9d | ||
|
|
572c1079a5 | ||
|
|
cb97239a60 | ||
|
|
e48145f959 | ||
|
|
2150cf7362 | ||
|
|
707e51eac7 | ||
|
|
0d10640aaa | ||
|
|
f4746f5064 | ||
|
|
c05bb63f96 | ||
|
|
e2773b3b4e | ||
|
|
d3b0bed091 | ||
|
|
33c80bbb96 | ||
|
|
705e4694ed | ||
|
|
4fbc90d115 | ||
|
|
ed39adc65b | ||
|
|
162fdfe455 | ||
|
|
8f33c932f2 | ||
|
|
4195bd7880 | ||
|
|
d72f3e31c0 | ||
|
|
11f44cff50 | ||
|
|
c3751e9a50 | ||
|
|
420ae905b5 | ||
|
|
a7d65bd519 | ||
|
|
1db31d7149 | ||
|
|
4641bd5116 | ||
|
|
7e602dbf39 | ||
|
|
e14d968f8d | ||
|
|
e0eeeaafcd | ||
|
|
d46f8d0ae5 | ||
|
|
1e6278556c | ||
|
|
303f4ee152 | ||
|
|
2fe8285f89 | ||
|
|
f5443ac939 | ||
|
|
7cf056b2c2 | ||
|
|
75a6c49f87 | ||
|
|
19229b1215 | ||
|
|
b5bb4c2a21 | ||
|
|
479c803fd9 | ||
|
|
3dcf1e61cf | ||
|
|
3da1cbfc81 | ||
|
|
0c9a8cf776 | ||
|
|
f3871377c3 | ||
|
|
cc9a7dc073 | ||
|
|
b61dd809ee | ||
|
|
f158a398f3 | ||
|
|
acefa5c40d | ||
|
|
2784c3234b | ||
|
|
c21a4fee58 | ||
|
|
358f5a8084 | ||
|
|
9115752679 | ||
|
|
51efb349ac | ||
|
|
e0d9314059 | ||
|
|
21c6babdbb | ||
|
|
5beeac7959 | ||
|
|
be5392f448 | ||
|
|
c00dcb7e67 | ||
|
|
6150ae89d6 | ||
|
|
1e423d21e1 | ||
|
|
53d55ae760 | ||
|
|
5928704e1b | ||
|
|
5ddfa9f7f6 | ||
|
|
9b5308144f | ||
|
|
4b20afa94a | ||
|
|
049ff1f269 | ||
|
|
3f7af64316 | ||
|
|
0eaf5475ef | ||
|
|
7bf056316f | ||
|
|
520ddbcceb | ||
|
|
1ce1ea34aa | ||
|
|
e6378daadf | ||
|
|
7ff95c6250 | ||
|
|
6d58d9a86f | ||
|
|
e0356f5aae | ||
|
|
191cfb79d1 | ||
|
|
e81eca4055 | ||
|
|
ee3215ac76 | ||
|
|
199ac61bde | ||
|
|
a40cc1167d | ||
|
|
c57ea8d867 | ||
|
|
1868c77e16 | ||
|
|
378a3f4133 | ||
|
|
daff5a824e | ||
|
|
6fabf476cf | ||
|
|
ab895390f4 | ||
|
|
a3a5857874 | ||
|
|
0f0079ff71 | ||
|
|
18c029e0f0 | ||
|
|
7eee2f904a | ||
|
|
3ef0c73826 | ||
|
|
59026c4761 | ||
|
|
76f5e273d2 | ||
|
|
2bbfcc74e9 | ||
|
|
ba7c2ac443 | ||
|
|
2d9b8cb981 | ||
|
|
2e50543053 | ||
|
|
22bf8589cd | ||
|
|
0871c57f1b | ||
|
|
0c265713fd | ||
|
|
9cb549a227 | ||
|
|
13e46c4b3f | ||
|
|
d40972bf1a | ||
|
|
b002ff8d54 | ||
|
|
38652d046d | ||
|
|
0b6cdb7677 | ||
|
|
543100070a | ||
|
|
e337cae0c5 | ||
|
|
90a23ae01b | ||
|
|
dd150efdd7 | ||
|
|
af05e290cf | ||
|
|
f9f9d5029b | ||
|
|
7d3b67f6cc | ||
|
|
929f275ae5 | ||
|
|
c526bdb579 | ||
|
|
1b2ffbeca0 | ||
|
|
19429083ad | ||
|
|
6e378d7d32 | ||
|
|
1fe1a19339 | ||
|
|
b63e9befe8 |
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -102,7 +102,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
@@ -211,7 +211,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@master
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
path: ./src/github.com/${{ github.repository }}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ Current active maintainers of rclone are:
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
14058
MANUAL.html
generated
14058
MANUAL.html
generated
File diff suppressed because one or more lines are too long
14069
MANUAL.txt
generated
14069
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
8
Makefile
8
Makefile
@@ -46,7 +46,8 @@ endif
|
||||
rclone:
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
@@ -96,6 +97,11 @@ update:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
|
||||
@@ -15,6 +15,7 @@ This file describes how to make the various kinds of releases
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* make tidy
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
@@ -88,7 +89,7 @@ Now
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make LAST_TAG=${NEW_TAG} startdev
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
|
||||
@@ -312,6 +312,9 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
|
||||
//
|
||||
// this code was copied from azblob.NewPipeline
|
||||
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
|
||||
// Don't log stuff to syslog/Windows Event log
|
||||
pipeline.SetForceLogEnabled(false)
|
||||
|
||||
// Closest to API goes first; closest to the wire goes last
|
||||
factories := []pipeline.Factory{
|
||||
azblob.NewTelemetryPolicyFactory(o.Telemetry),
|
||||
|
||||
@@ -1375,6 +1375,12 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
|
||||
if o.sha1 == "" || o.sha1 == "none" {
|
||||
o.sha1 = Info[sha1Key]
|
||||
}
|
||||
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
|
||||
// Some tools (eg Cyberduck) use this
|
||||
const unverified = "unverified:"
|
||||
if strings.HasPrefix(o.sha1, unverified) {
|
||||
o.sha1 = o.sha1[len(unverified):]
|
||||
}
|
||||
o.size = Size
|
||||
// Use the UploadTimestamp if can't get file info
|
||||
o.modTime = time.Time(UploadTimestamp)
|
||||
|
||||
2
backend/cache/storage_persistent.go
vendored
2
backend/cache/storage_persistent.go
vendored
@@ -16,7 +16,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
|
||||
@@ -63,6 +63,7 @@ func init() {
|
||||
Name: "password",
|
||||
Help: "Password or pass phrase for encryption.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
|
||||
@@ -10,6 +10,7 @@ package drive
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -213,7 +214,15 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "root_folder_id",
|
||||
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
|
||||
Help: `ID of the root folder
|
||||
Leave blank normally.
|
||||
|
||||
Fill in to access "Computers" folders (see docs), or for rclone to use
|
||||
a non root folder as its starting point.
|
||||
|
||||
Note that if this is blank, the first time rclone runs it will fill it
|
||||
in with the ID of the root folder.
|
||||
`,
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||
@@ -317,6 +326,17 @@ Photos folder" option in your google drive settings. You can then copy
|
||||
or move the photos locally and use the date the image was taken
|
||||
(created) set as the modification date.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_shared_date",
|
||||
Default: false,
|
||||
Help: `Use date file was shared instead of modified date.
|
||||
|
||||
Note that, as with "--drive-use-created-date", this flag may have
|
||||
unexpected consequences when uploading/downloading files.
|
||||
|
||||
If both this flag and "--drive-use-created-date" are set, the created
|
||||
date is used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "list_chunk",
|
||||
Default: 1000,
|
||||
@@ -402,9 +422,23 @@ older versions that have been set to keep forever.`,
|
||||
|
||||
This can be useful if you wish to do a server side copy between two
|
||||
different Google drives. Note that this isn't enabled by default
|
||||
because it isn't easy to tell if it will work beween any two
|
||||
because it isn't easy to tell if it will work between any two
|
||||
configurations.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: true,
|
||||
Help: `Disable drive using http2
|
||||
|
||||
There is currently an unsolved issue with the google drive backend and
|
||||
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
|
||||
but can be re-enabled here. When the issue is solved this flag will
|
||||
be removed.
|
||||
|
||||
See: https://github.com/rclone/rclone/issues/3631
|
||||
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
|
||||
@@ -440,6 +474,7 @@ type Options struct {
|
||||
ImportExtensions string `config:"import_formats"`
|
||||
AllowImportNameChange bool `config:"allow_import_name_change"`
|
||||
UseCreatedDate bool `config:"use_created_date"`
|
||||
UseSharedDate bool `config:"use_shared_date"`
|
||||
ListChunk int64 `config:"list_chunk"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
AlternateExport bool `config:"alternate_export"`
|
||||
@@ -452,6 +487,7 @@ type Options struct {
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
PacerBurst int `config:"pacer_burst"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -565,6 +601,23 @@ func containsString(slice []string, s string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// getRootID returns the canonical ID for the "root" ID
|
||||
func (f *Fs) getRootID() (string, error) {
|
||||
var info *drive.File
|
||||
var err error
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "couldn't find root directory ID")
|
||||
}
|
||||
return info.Id, nil
|
||||
}
|
||||
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
@@ -653,6 +706,9 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
|
||||
if f.opt.AuthOwnerOnly {
|
||||
fields += ",owners"
|
||||
}
|
||||
if f.opt.UseSharedDate {
|
||||
fields += ",sharedWithMeTime"
|
||||
}
|
||||
if f.opt.SkipChecksumGphotos {
|
||||
fields += ",spaces"
|
||||
}
|
||||
@@ -789,7 +845,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
|
||||
} else {
|
||||
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||
}
|
||||
if !config.Confirm() {
|
||||
if !config.Confirm(false) {
|
||||
return nil
|
||||
}
|
||||
client, err := createOAuthClient(opt, name, m)
|
||||
@@ -840,6 +896,18 @@ func newPacer(opt *Options) *fs.Pacer {
|
||||
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
|
||||
}
|
||||
|
||||
// getClient makes an http client according to the options
|
||||
func getClient(opt *Options) *http.Client {
|
||||
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
|
||||
if opt.DisableHTTP2 {
|
||||
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
|
||||
}
|
||||
})
|
||||
return &http.Client{
|
||||
Transport: t,
|
||||
}
|
||||
}
|
||||
|
||||
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
|
||||
@@ -849,7 +917,7 @@ func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client
|
||||
if opt.Impersonate != "" {
|
||||
conf.Subject = opt.Impersonate
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@@ -871,7 +939,7 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
|
||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||
}
|
||||
@@ -968,15 +1036,25 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// set root folder for a team drive or query the user root folder
|
||||
if f.isTeamDrive {
|
||||
if opt.RootFolderID != "" {
|
||||
// override root folder if set or cached in the config
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
} else if f.isTeamDrive {
|
||||
f.rootFolderID = f.opt.TeamDriveID
|
||||
} else {
|
||||
f.rootFolderID = "root"
|
||||
}
|
||||
|
||||
// override root folder if set in the config
|
||||
if opt.RootFolderID != "" {
|
||||
f.rootFolderID = opt.RootFolderID
|
||||
// Look up the root ID and cache it in the config
|
||||
rootID, err := f.getRootID()
|
||||
if err != nil {
|
||||
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
|
||||
// 404 means that this scope does not have permission to get the
|
||||
// root so just use "root"
|
||||
rootID = "root"
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
f.rootFolderID = rootID
|
||||
m.Set("root_folder_id", rootID)
|
||||
}
|
||||
|
||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||
@@ -1032,6 +1110,8 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
|
||||
modifiedDate := info.ModifiedTime
|
||||
if f.opt.UseCreatedDate {
|
||||
modifiedDate = info.CreatedTime
|
||||
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
|
||||
modifiedDate = info.SharedWithMeTime
|
||||
}
|
||||
size := info.Size
|
||||
if f.opt.SizeAsQuota {
|
||||
@@ -1400,6 +1480,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -1458,6 +1546,10 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
|
||||
listRSlices{dirs, paths}.Sort()
|
||||
var iErr error
|
||||
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||
// shared with me items have no parents when at the root
|
||||
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
|
||||
item.Parents = dirs
|
||||
}
|
||||
for _, parent := range item.Parents {
|
||||
// only handle parents that are in the requested dirs list
|
||||
i := sort.SearchStrings(dirs, parent)
|
||||
@@ -1526,20 +1618,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if directoryID == "root" {
|
||||
var info *drive.File
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
info, err = f.svc.Files.Get("root").
|
||||
Fields("id").
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directoryID = info.Id
|
||||
}
|
||||
|
||||
mu := sync.Mutex{} // protects in and overflow
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -1547,6 +1625,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
out := make(chan error, fs.Config.Checkers)
|
||||
list := walk.NewListRHelper(callback)
|
||||
overflow := []listREntry{}
|
||||
listed := 0
|
||||
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
mu.Lock()
|
||||
@@ -1559,6 +1638,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||
}
|
||||
}
|
||||
listed++
|
||||
return list.Add(entry)
|
||||
}
|
||||
|
||||
@@ -1615,7 +1695,21 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
return err
|
||||
}
|
||||
|
||||
return list.Flush()
|
||||
err = list.Flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If listing the root of a teamdrive and got no entries,
|
||||
// double check we have access
|
||||
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
|
||||
err = f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// itemToDirEntry converts a drive.File to a fs.DirEntry.
|
||||
@@ -1988,9 +2082,30 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// teamDriveOK checks to see if we can access the team drive
|
||||
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
|
||||
if !f.isTeamDrive {
|
||||
return nil
|
||||
}
|
||||
var td *drive.Drive
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get Team/Shared Drive info")
|
||||
}
|
||||
fs.Debugf(f, "read info from team drive %q", td.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if f.isTeamDrive {
|
||||
err := f.teamDriveOK(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Teamdrives don't appear to have a usage API so just return empty
|
||||
return &fs.Usage{}, nil
|
||||
}
|
||||
@@ -2267,9 +2382,11 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
|
||||
var startPageToken *drive.StartPageToken
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startPageToken, err = f.svc.Changes.GetStartPageToken().
|
||||
SupportsAllDrives(true).
|
||||
Do()
|
||||
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
changes.DriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
startPageToken, err = changes.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -2292,7 +2409,11 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
changesCall.SupportsAllDrives(true)
|
||||
changesCall.IncludeItemsFromAllDrives(true)
|
||||
if f.isTeamDrive {
|
||||
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||
changesCall.DriveId(f.opt.TeamDriveID)
|
||||
}
|
||||
// If using appDataFolder then need to add Spaces
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
changesCall.Spaces("appDataFolder")
|
||||
}
|
||||
changeList, err = changesCall.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
|
||||
@@ -113,7 +113,7 @@ var (
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
|
||||
@@ -46,13 +46,26 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
Username string `json:"username"`
|
||||
Realm string `json:"realm"`
|
||||
WellKnownLink string `json:"well_known_link"`
|
||||
AuthToken string `json:"auth_token"`
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
RefreshExpiresIn int32 `json:"refresh_expires_in"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
NotBeforePolicy int32 `json:"not-before-policy"`
|
||||
SessionState string `json:"session_state"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
@@ -4,12 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -25,7 +26,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
@@ -41,29 +41,25 @@ const enc = encodings.JottaCloud
|
||||
|
||||
// Globals
|
||||
const (
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://api.jottacloud.com/auth/v1/token"
|
||||
registerURL = "https://api.jottacloud.com/auth/v1/register"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
|
||||
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
configClientID = "client_id"
|
||||
configClientSecret = "client_secret"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
minSleep = 10 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
defaultDevice = "Jotta"
|
||||
defaultMountpoint = "Archive"
|
||||
rootURL = "https://www.jottacloud.com/jfs/"
|
||||
apiURL = "https://api.jottacloud.com/"
|
||||
baseURL = "https://www.jottacloud.com/"
|
||||
tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
|
||||
cachePrefix = "rclone-jcmd5-"
|
||||
configDevice = "device"
|
||||
configMountpoint = "mountpoint"
|
||||
configVersion = 1
|
||||
)
|
||||
|
||||
var (
|
||||
// Description of how to auth for this app for a personal account
|
||||
oauthConfig = &oauth2.Config{
|
||||
ClientID: "jottacli",
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tokenURL,
|
||||
TokenURL: tokenURL,
|
||||
@@ -81,43 +77,39 @@ func init() {
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
ctx := context.TODO()
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(fs.Config))
|
||||
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
|
||||
if config.Confirm() {
|
||||
deviceRegistration, err := registerDevice(ctx, srv)
|
||||
refresh := false
|
||||
if version, ok := m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to register device: %v", err)
|
||||
log.Fatalf("Failed to parse config version - corrupted config")
|
||||
}
|
||||
|
||||
m.Set(configClientID, deviceRegistration.ClientID)
|
||||
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
|
||||
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
|
||||
refresh = ver != configVersion
|
||||
} else {
|
||||
refresh = true
|
||||
}
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
if refresh {
|
||||
fmt.Printf("Config outdated - refreshing\n")
|
||||
} else {
|
||||
tokenString, ok := m.Get("token")
|
||||
if ok && tokenString != "" {
|
||||
fmt.Printf("Already have a token - refresh?\n")
|
||||
if !config.Confirm(false) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
fmt.Printf("Username> ")
|
||||
username := config.ReadLine()
|
||||
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
|
||||
clientConfig := *fs.Config
|
||||
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
|
||||
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
|
||||
|
||||
token, err := doAuth(ctx, srv, username, password)
|
||||
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
|
||||
fmt.Printf("Login Token> ")
|
||||
loginToken := config.ReadLine()
|
||||
|
||||
token, err := doAuth(ctx, srv, loginToken)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get oauth token: %s", err)
|
||||
}
|
||||
@@ -127,7 +119,7 @@ func init() {
|
||||
}
|
||||
|
||||
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
|
||||
if config.Confirm() {
|
||||
if config.Confirm(false) {
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load oAuthClient: %s", err)
|
||||
@@ -143,6 +135,8 @@ func init() {
|
||||
m.Set(configDevice, device)
|
||||
m.Set(configMountpoint, mountpoint)
|
||||
}
|
||||
|
||||
m.Set("configVersion", strconv.Itoa(configVersion))
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "md5_memory_limit",
|
||||
@@ -249,67 +243,51 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// registerDevice register a new device for use with the jottacloud API
|
||||
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
|
||||
// random generator to generate random device names
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
randonDeviceNamePartLength := 21
|
||||
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
|
||||
for i := range randomDeviceNamePart {
|
||||
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
|
||||
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
|
||||
|
||||
values := url.Values{}
|
||||
values.Set("device_id", randomDeviceName)
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: registerURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
|
||||
Parameters: values,
|
||||
}
|
||||
|
||||
var deviceRegistration *api.DeviceRegistrationResponse
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
|
||||
return deviceRegistration, err
|
||||
}
|
||||
|
||||
// doAuth runs the actual token request
|
||||
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
|
||||
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
|
||||
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
var loginToken api.LoginToken
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
// we don't seem to need any data from this link but the API is not happy if skip it
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: loginToken.WellKnownLink,
|
||||
NoResponse: true,
|
||||
}
|
||||
_, err = srv.Call(ctx, &opts)
|
||||
if err != nil {
|
||||
return token, err
|
||||
}
|
||||
|
||||
// prepare out token request with username and password
|
||||
values := url.Values{}
|
||||
values.Set("grant_type", "PASSWORD")
|
||||
values.Set("password", password)
|
||||
values.Set("username", username)
|
||||
values.Set("client_id", oauthConfig.ClientID)
|
||||
values.Set("client_secret", oauthConfig.ClientSecret)
|
||||
opts := rest.Opts{
|
||||
values.Set("client_id", "jottacli")
|
||||
values.Set("grant_type", "password")
|
||||
values.Set("password", loginToken.AuthToken)
|
||||
values.Set("scope", "offline_access+openid")
|
||||
values.Set("username", loginToken.Username)
|
||||
values.Encode()
|
||||
opts = rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: oauthConfig.Endpoint.AuthURL,
|
||||
ContentType: "application/x-www-form-urlencoded",
|
||||
Parameters: values,
|
||||
Body: strings.NewReader(values.Encode()),
|
||||
}
|
||||
|
||||
// do the first request
|
||||
var jsonToken api.TokenJSON
|
||||
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||
if resp != nil {
|
||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
|
||||
fmt.Printf("Enter verification code> ")
|
||||
authCode := config.ReadLine()
|
||||
|
||||
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
|
||||
opts.ExtraHeaders = make(map[string]string)
|
||||
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
|
||||
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
}
|
||||
}
|
||||
return token, err
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
@@ -471,29 +449,6 @@ func (f *Fs) filePath(file string) string {
|
||||
return urlPathEscape(f.filePathRaw(file))
|
||||
}
|
||||
|
||||
// Jottacloud requires the grant_type 'refresh_token' string
|
||||
// to be uppercase and throws a 400 Bad Request if we use the
|
||||
// lower case used by the oauth2 module
|
||||
//
|
||||
// This filter catches all refresh requests, reads the body,
|
||||
// changes the case and then sends it on
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if tokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = req.Body.Close()
|
||||
|
||||
// make the refresh token upper case
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
@@ -504,30 +459,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
var version string
|
||||
if version, ok = m.Get("configVersion"); ok {
|
||||
ver, err := strconv.Atoi(version)
|
||||
if err != nil {
|
||||
return nil, errors.New("Failed to parse config version")
|
||||
}
|
||||
ok = ver == configVersion
|
||||
}
|
||||
if !ok {
|
||||
return nil, errors.New("Outdated config - please reconfigure this backend")
|
||||
}
|
||||
|
||||
rootIsDir := strings.HasSuffix(root, "/")
|
||||
root = parsePath(root)
|
||||
|
||||
clientID, ok := m.Get(configClientID)
|
||||
if !ok {
|
||||
clientID = rcloneClientID
|
||||
}
|
||||
clientSecret, ok := m.Get(configClientSecret)
|
||||
if !ok {
|
||||
clientSecret = rcloneEncryptedClientSecret
|
||||
}
|
||||
oauthConfig.ClientID = clientID
|
||||
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
|
||||
|
||||
// the oauth client for the api servers needs
|
||||
// a filter to fix the grant_type issues (see above)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(grantTypeFilter)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/encodings"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
@@ -259,7 +260,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
|
||||
@@ -350,7 +350,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
fs.Errorf(dir, "%v", err)
|
||||
if isPerm {
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
||||
err = nil // ignore error but fail sync
|
||||
}
|
||||
return nil, err
|
||||
@@ -386,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if fierr != nil {
|
||||
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
}
|
||||
fis = append(fis, fi)
|
||||
@@ -409,7 +409,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Skip bad symlinks
|
||||
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
||||
fs.Errorf(newRemote, "Listing error: %v", err)
|
||||
accounting.Stats(ctx).Error(err)
|
||||
err = accounting.Stats(ctx).Error(err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1084,17 +1084,17 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
|
||||
func cleanRootPath(s string, noUNC bool) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
|
||||
@@ -54,7 +54,7 @@ var testsWindows = [][2]string{
|
||||
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
|
||||
{`c:/temp`, `c:\temp`},
|
||||
{`/temp/file.txt`, `\temp\file.txt`},
|
||||
{`C:/temp/file.txt`, `C:\temp\file.txt`},
|
||||
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\"#¤%&\()=;:*^?+-`},
|
||||
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`},
|
||||
}
|
||||
|
||||
@@ -351,8 +351,13 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
// instead of simply using `drives/driveID/root:/itemPath` because it works for
|
||||
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
|
||||
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
|
||||
//
|
||||
// If `relPath` == '', do not append the slash (See #3664)
|
||||
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
|
||||
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
|
||||
if relPath != "" {
|
||||
relPath = "/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath)))
|
||||
}
|
||||
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
|
||||
return shouldRetry(resp, err)
|
||||
|
||||
43
backend/putio/error.go
Normal file
43
backend/putio/error.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package putio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
func checkStatusCode(resp *http.Response, expected int) error {
|
||||
if resp.StatusCode != expected {
|
||||
return &statusCodeError{response: resp}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type statusCodeError struct {
|
||||
response *http.Response
|
||||
}
|
||||
|
||||
func (e *statusCodeError) Error() string {
|
||||
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
|
||||
}
|
||||
|
||||
func (e *statusCodeError) Temporary() bool {
|
||||
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
err = &statusCodeError{response: perr.Response}
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/putdotio/go-putio/putio"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
@@ -58,23 +57,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserves to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(err error) (bool, error) {
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
if perr, ok := err.(*putio.ErrorResponse); ok {
|
||||
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
|
||||
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
|
||||
@@ -318,66 +300,125 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
|
||||
}
|
||||
|
||||
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
|
||||
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", &fileID, &err)
|
||||
if size == 0 {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending zero length chunk")
|
||||
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return
|
||||
}
|
||||
var start int64
|
||||
var clientOffset int64
|
||||
var offsetMismatch bool
|
||||
buf := make([]byte, defaultChunkSize)
|
||||
for start < size {
|
||||
reqSize := size - start
|
||||
if reqSize >= int64(defaultChunkSize) {
|
||||
reqSize = int64(defaultChunkSize)
|
||||
for clientOffset < size {
|
||||
chunkSize := size - clientOffset
|
||||
if chunkSize >= int64(defaultChunkSize) {
|
||||
chunkSize = int64(defaultChunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
chunkStart := clientOffset
|
||||
reqSize := chunkSize
|
||||
transferOffset := clientOffset
|
||||
fs.Debugf(f, "chunkStart: %d, reqSize: %d", chunkStart, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
|
||||
// TODO get file offset and seek to the position
|
||||
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
|
||||
if offsetMismatch {
|
||||
// Get file offset and seek to the position
|
||||
offset, err := f.getServerOffset(ctx, location)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
sentBytes := offset - chunkStart
|
||||
fs.Debugf(f, "sentBytes: %d", sentBytes)
|
||||
_, err = chunk.Seek(sentBytes, io.SeekStart)
|
||||
if err != nil {
|
||||
return shouldRetry(err)
|
||||
}
|
||||
transferOffset = offset
|
||||
reqSize = chunkSize - sentBytes
|
||||
offsetMismatch = false
|
||||
}
|
||||
fs.Debugf(f, "Sending chunk. transferOffset: %d length: %d", transferOffset, reqSize)
|
||||
var serverOffset int64
|
||||
serverOffset, fileID, err = f.transferChunk(ctx, location, transferOffset, chunk, reqSize)
|
||||
if cerr, ok := err.(*statusCodeError); ok && cerr.response.StatusCode == 409 {
|
||||
offsetMismatch = true
|
||||
return true, err
|
||||
}
|
||||
if serverOffset != (transferOffset + reqSize) {
|
||||
offsetMismatch = true
|
||||
return true, errors.New("connection broken")
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
start += reqSize
|
||||
clientOffset += chunkSize
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
func (f *Fs) getServerOffset(ctx context.Context, location string) (offset int64, err error) {
|
||||
// defer log.Trace(f, "location=%v", location)("offset=%v, err=%v", &offset, &err)
|
||||
req, err := f.makeUploadHeadRequest(ctx, location)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = checkStatusCode(resp, 200)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
|
||||
}
|
||||
|
||||
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (serverOffset, fileID int64, err error) {
|
||||
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", &fileID, &err)
|
||||
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := f.oAuthClient.Do(req)
|
||||
resp, err := f.oAuthClient.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
_ = resp.Body.Close()
|
||||
}()
|
||||
if res.StatusCode != 204 {
|
||||
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
|
||||
err = checkStatusCode(resp, 204)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sfid := res.Header.Get("putio-file-id")
|
||||
serverOffset, err = strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sfid := resp.Header.Get("putio-file-id")
|
||||
if sfid != "" {
|
||||
fileID, err = strconv.ParseInt(sfid, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return
|
||||
}
|
||||
}
|
||||
return fileID, nil
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
|
||||
req, err := http.NewRequest("HEAD", location, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.Header.Set("tus-resumable", "1.0.0")
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {
|
||||
|
||||
@@ -269,7 +269,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||
cf.Protocol = protocol
|
||||
cf.Host = host
|
||||
cf.Port = port
|
||||
cf.ConnectionRetries = opt.ConnectionRetries
|
||||
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
|
||||
cf.Connection = fshttp.NewClient(fs.Config)
|
||||
|
||||
return qs.Init(cf)
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
@@ -693,16 +694,37 @@ The minimum is 0 and the maximum is 5GB.`,
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "--s3-upload-concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.`,
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5MB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
file you can stream upload is 48GB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy
|
||||
|
||||
Any files larger than this that need to be server side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5GB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: "Don't store MD5 checksum with object metadata",
|
||||
@@ -771,12 +793,11 @@ WARNING: Storing parts of an incomplete multipart upload counts towards space us
|
||||
|
||||
// Constants
|
||||
const (
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
|
||||
listChunkSize = 1000 // number of items to read at once
|
||||
maxRetries = 10 // number of retries to make of operations
|
||||
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
|
||||
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
@@ -798,6 +819,7 @@ type Options struct {
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SessionToken string `config:"session_token"`
|
||||
@@ -961,7 +983,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||
Client: ec2metadata.New(session.New(), &aws.Config{
|
||||
HTTPClient: lowTimeoutClient,
|
||||
}),
|
||||
ExpiryWindow: 3,
|
||||
ExpiryWindow: 3 * time.Minute,
|
||||
},
|
||||
}
|
||||
cred := credentials.NewChainCredentials(providers)
|
||||
@@ -1642,7 +1664,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.StorageClass = &f.opt.StorageClass
|
||||
}
|
||||
|
||||
if srcSize >= int64(f.opt.UploadCutoff) {
|
||||
if srcSize >= int64(f.opt.CopyCutoff) {
|
||||
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@@ -1655,8 +1677,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
|
||||
start := partIndex * partSize
|
||||
var ends string
|
||||
if partIndex == numParts-1 {
|
||||
if totalSize >= 0 {
|
||||
ends = strconv.FormatInt(totalSize, 10)
|
||||
if totalSize >= 1 {
|
||||
ends = strconv.FormatInt(totalSize-1, 10)
|
||||
}
|
||||
} else {
|
||||
ends = strconv.FormatInt(start+partSize-1, 10)
|
||||
@@ -1693,7 +1715,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
|
||||
}
|
||||
}()
|
||||
|
||||
partSize := int64(f.opt.ChunkSize)
|
||||
partSize := int64(f.opt.CopyCutoff)
|
||||
numParts := (srcSize-1)/partSize + 1
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
@@ -1872,6 +1894,9 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
o.etag = aws.StringValue(resp.ETag)
|
||||
o.bytes = size
|
||||
o.meta = resp.Metadata
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]*string{}
|
||||
}
|
||||
o.storageClass = aws.StringValue(resp.StorageClass)
|
||||
if resp.LastModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
|
||||
@@ -1918,11 +1943,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
}
|
||||
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
if o.bytes >= maxSizeForCopy {
|
||||
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Can't update metadata here, so return this error to force a recopy
|
||||
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
|
||||
return fs.ErrorCantSetModTime
|
||||
@@ -1979,6 +1999,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -1998,10 +2020,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
u.S3 = o.fs.c
|
||||
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
// Make parts as small as possible while still being able to upload to the
|
||||
// S3 file size limit. Rounded up to nearest MB.
|
||||
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o.fs, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, fs.SizeSuffix(u.PartSize*s3manager.MaxUploadParts))
|
||||
})
|
||||
return
|
||||
}
|
||||
// Adjust PartSize until the number of parts is small enough.
|
||||
@@ -2020,7 +2046,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// read the md5sum if available for non multpart and if
|
||||
// disable checksum isn't present.
|
||||
var md5sum string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
if !multipart && !o.fs.opt.DisableChecksum {
|
||||
hash, err := src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(hash) {
|
||||
hashBytes, err := hex.DecodeString(hash)
|
||||
|
||||
@@ -29,15 +29,17 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
sshagent "github.com/xanzy/ssh-agent"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
|
||||
hashCommandNotSupported = "none"
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,8 +88,19 @@ requested from the ssh-agent. This allows to avoid ` + "`Too many authentication
|
||||
when the ssh-agent contains many keys.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "use_insecure_cipher",
|
||||
Help: "Enable the use of the aes128-cbc cipher and diffie-hellman-group-exchange-sha256, diffie-hellman-group-exchange-sha1 key exchange. Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.",
|
||||
Name: "use_insecure_cipher",
|
||||
Help: `Enable the use of insecure ciphers and key exchange methods.
|
||||
|
||||
This enables the use of the the following insecure ciphers and key exchange methods:
|
||||
|
||||
- aes128-cbc
|
||||
- aes192-cbc
|
||||
- aes256-cbc
|
||||
- 3des-cbc
|
||||
- diffie-hellman-group-exchange-sha256
|
||||
- diffie-hellman-group-exchange-sha1
|
||||
|
||||
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -143,6 +156,11 @@ Home directory can be found in a shared folder called "home"
|
||||
Default: "",
|
||||
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "skip_links",
|
||||
Default: false,
|
||||
Help: "Set to skip any symlinks and any other non regular files.",
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -164,6 +182,7 @@ type Options struct {
|
||||
SetModTime bool `config:"set_modtime"`
|
||||
Md5sumCommand string `config:"md5sum_command"`
|
||||
Sha1sumCommand string `config:"sha1sum_command"`
|
||||
SkipLinks bool `config:"skip_links"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -179,7 +198,7 @@ type Fs struct {
|
||||
cachedHashes *hash.Set
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
connLimit *rate.Limiter // for limiting number of connections per second
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -259,10 +278,6 @@ func (c *conn) closed() error {
|
||||
// Open a new connection to the SFTP server.
|
||||
func (f *Fs) sftpConnection() (c *conn, err error) {
|
||||
// Rate limit rate of new connections
|
||||
err = f.connLimit.Wait(context.Background())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "limiter failed in connect")
|
||||
}
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
@@ -296,7 +311,14 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return f.sftpConnection()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.sftpConnection()
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return an SFTP connection to the pool
|
||||
@@ -363,7 +385,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
|
||||
}
|
||||
|
||||
@@ -454,7 +476,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
|
||||
config: sshConfig,
|
||||
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||
mkdirLock: newStringLock(),
|
||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
@@ -584,12 +606,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
remote := path.Join(dir, info.Name())
|
||||
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
|
||||
// pick up the size and type of the destination, instead of the size and type of the symlink.
|
||||
if !info.Mode().IsRegular() {
|
||||
if !info.Mode().IsRegular() && !info.IsDir() {
|
||||
if f.opt.SkipLinks {
|
||||
// skip non regular file if SkipLinks is set
|
||||
continue
|
||||
}
|
||||
oldInfo := info
|
||||
info, err = f.stat(remote)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
|
||||
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
|
||||
}
|
||||
info = oldInfo
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -530,10 +531,10 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
|
||||
if prefix != "" {
|
||||
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
if directory != "" && !strings.HasSuffix(directory, "/") {
|
||||
directory += "/"
|
||||
}
|
||||
// Options for ObjectsWalk
|
||||
@@ -952,6 +953,18 @@ func (o *Object) isStaticLargeObject() (bool, error) {
|
||||
return o.hasHeader("X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isInContainerVersioning(container string) (bool, error) {
|
||||
_, headers, err := o.fs.c.Container(container)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
xHistoryLocation := headers["X-History-Location"]
|
||||
if len(xHistoryLocation) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
@@ -1083,9 +1096,8 @@ func min(x, y int64) int64 {
|
||||
//
|
||||
// if except is passed in then segments with that prefix won't be deleted
|
||||
func (o *Object) removeSegments(except string) error {
|
||||
container, containerPath := o.split()
|
||||
segmentsContainer := container + "_segments"
|
||||
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
segmentsContainer, prefix, err := o.getSegmentsDlo()
|
||||
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1114,6 +1126,23 @@ func (o *Object) removeSegments(except string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
|
||||
if err = o.readMetaData(); err != nil {
|
||||
return
|
||||
}
|
||||
dirManifest := o.headers["X-Object-Manifest"]
|
||||
dirManifest, err = url.PathUnescape(dirManifest)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
delimiter := strings.Index(dirManifest, "/")
|
||||
if len(dirManifest) == 0 || delimiter < 0 {
|
||||
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
|
||||
return
|
||||
}
|
||||
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
|
||||
}
|
||||
|
||||
// urlEncode encodes a string so that it is a valid URL
|
||||
//
|
||||
// We don't use any of Go's standard methods as we need `/` not
|
||||
@@ -1300,12 +1329,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
container, containerPath := o.split()
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove file/manifest first
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.c.ObjectDelete(container, containerPath)
|
||||
@@ -1314,12 +1340,22 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// ...then segments if required
|
||||
if isDynamicLargeObject {
|
||||
err = o.removeSegments("")
|
||||
isInContainerVersioning, err := o.isInContainerVersioning(container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !isInContainerVersioning {
|
||||
err = o.removeSegments("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -113,7 +113,8 @@ type Fs struct {
|
||||
canStream bool // set if can stream
|
||||
useOCMtime bool // set if can use X-OC-Mtime
|
||||
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
|
||||
hasChecksums bool // set if can use owncloud style checksums
|
||||
hasMD5 bool // set if can use owncloud style checksums for MD5
|
||||
hasSHA1 bool // set if can use owncloud style checksums for SHA1
|
||||
}
|
||||
|
||||
// Object describes a webdav object
|
||||
@@ -215,7 +216,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
|
||||
},
|
||||
NoRedirect: true,
|
||||
}
|
||||
if f.hasChecksums {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -383,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// sets the BearerToken up
|
||||
func (f *Fs) setBearerToken(token string) {
|
||||
f.opt.BearerToken = token
|
||||
f.srv.SetHeader("Authorization", "BEARER "+token)
|
||||
f.srv.SetHeader("Authorization", "Bearer "+token)
|
||||
}
|
||||
|
||||
// fetch the bearer token using the command
|
||||
@@ -430,11 +431,12 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
f.canStream = true
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
f.hasMD5 = true
|
||||
f.hasSHA1 = true
|
||||
case "nextcloud":
|
||||
f.precision = time.Second
|
||||
f.useOCMtime = true
|
||||
f.hasChecksums = true
|
||||
f.hasSHA1 = true
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
// They have to be set instead of BasicAuth
|
||||
@@ -536,7 +538,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
"Depth": depth,
|
||||
},
|
||||
}
|
||||
if f.hasChecksums {
|
||||
if f.hasMD5 || f.hasSHA1 {
|
||||
opts.Body = bytes.NewBuffer(owncloudProps)
|
||||
}
|
||||
var result api.Multistatus
|
||||
@@ -945,10 +947,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.hasChecksums {
|
||||
return hash.NewHashSet(hash.MD5, hash.SHA1)
|
||||
hashes := hash.Set(hash.None)
|
||||
if f.hasMD5 {
|
||||
hashes.Add(hash.MD5)
|
||||
}
|
||||
return hash.Set(hash.None)
|
||||
if f.hasSHA1 {
|
||||
hashes.Add(hash.SHA1)
|
||||
}
|
||||
return hashes
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
@@ -1015,13 +1021,11 @@ func (o *Object) Remote() string {
|
||||
|
||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.hasChecksums {
|
||||
switch t {
|
||||
case hash.SHA1:
|
||||
return o.sha1, nil
|
||||
case hash.MD5:
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.MD5 && o.fs.hasMD5 {
|
||||
return o.md5, nil
|
||||
}
|
||||
if t == hash.SHA1 && o.fs.hasSHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1042,10 +1046,14 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.modTime = time.Time(info.Modified)
|
||||
if o.fs.hasChecksums {
|
||||
if o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
hashes := info.Hashes()
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
o.md5 = hashes[hash.MD5]
|
||||
if o.fs.hasSHA1 {
|
||||
o.sha1 = hashes[hash.SHA1]
|
||||
}
|
||||
if o.fs.hasMD5 {
|
||||
o.md5 = hashes[hash.MD5]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1126,19 +1134,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
}
|
||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
|
||||
opts.ExtraHeaders = map[string]string{}
|
||||
if o.fs.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
|
||||
}
|
||||
if o.fs.hasChecksums {
|
||||
// Set an upload checksum - prefer SHA1
|
||||
//
|
||||
// This is used as an upload integrity test. If we set
|
||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||
// Set one upload checksum
|
||||
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
|
||||
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
|
||||
if o.fs.hasSHA1 {
|
||||
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
}
|
||||
}
|
||||
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
|
||||
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -19,8 +20,9 @@ var (
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
commandDefinition.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
|
||||
commandDefinition.Flags().BoolVar(&fullOutput, "full", false, "Full numbers instead of SI units")
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of SI units")
|
||||
}
|
||||
|
||||
// printValue formats uv to be output
|
||||
|
||||
@@ -3,22 +3,32 @@ package authorize
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "authorize",
|
||||
Short: `Remote authorization.`,
|
||||
Long: `
|
||||
Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.`,
|
||||
rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
config.Authorize(args)
|
||||
config.Authorize(args, noAutoBrowser)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -22,15 +23,16 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
|
||||
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
|
||||
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
|
||||
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters.")
|
||||
flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters.")
|
||||
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
|
||||
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters.")
|
||||
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cat remote:path",
|
||||
Short: `Concatenates any files and sends them to stdout.`,
|
||||
Long: `
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -15,12 +16,13 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
|
||||
flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "check source:path dest:path",
|
||||
Short: `Checks the files in the source and destination match.`,
|
||||
Long: `
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible`,
|
||||
Long: `
|
||||
|
||||
26
cmd/cmd.go
26
cmd/cmd.go
@@ -82,7 +82,7 @@ func ShowVersion() {
|
||||
func NewFsFile(remote string) (fs.Fs, string) {
|
||||
_, _, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
f, err := cache.Get(remote)
|
||||
@@ -92,7 +92,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
||||
case nil:
|
||||
return f, ""
|
||||
default:
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return nil, ""
|
||||
@@ -107,13 +107,13 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
||||
if fileName != "" {
|
||||
if !filter.Active.InActive() {
|
||||
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
// Limit transfers to this file
|
||||
err := filter.Active.AddFile(fileName)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func NewFsSrc(args []string) fs.Fs {
|
||||
func newFsDir(remote string) fs.Fs {
|
||||
f, err := cache.Get(remote)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||
}
|
||||
return f
|
||||
@@ -189,11 +189,11 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
||||
fdst, err := cache.Get(dstRemote)
|
||||
switch err {
|
||||
case fs.ErrorIsFile:
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
|
||||
case nil:
|
||||
default:
|
||||
fs.CountError(err)
|
||||
_ = fs.CountError(err)
|
||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
||||
}
|
||||
return
|
||||
@@ -239,7 +239,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||
SigInfoHandler()
|
||||
for try := 1; try <= *retries; try++ {
|
||||
err = f()
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
lastErr := accounting.GlobalStats().GetLastError()
|
||||
if err == nil {
|
||||
err = lastErr
|
||||
@@ -386,12 +386,12 @@ func initConfig() {
|
||||
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
||||
f, err := os.Create(*cpuProfile)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.StartCPUProfile(f)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
atexit.Register(func() {
|
||||
@@ -405,17 +405,17 @@ func initConfig() {
|
||||
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
||||
f, err := os.Create(*memProfile)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = pprof.WriteHeapProfile(f)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = f.Close()
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -246,7 +246,12 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
for _, item := range items {
|
||||
node, ok := item.(vfs.Node)
|
||||
if ok {
|
||||
fill(node.Name(), nil, 0)
|
||||
name := node.Name()
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
fill(name, nil, 0)
|
||||
}
|
||||
}
|
||||
itemsRead = len(items)
|
||||
@@ -366,7 +371,12 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
n, err := handle.WriteAt(buff, ofst)
|
||||
var err error
|
||||
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = handle.WriteAt(buff, ofst)
|
||||
} else {
|
||||
n, err = handle.Write(buff)
|
||||
}
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
)
|
||||
@@ -207,7 +208,7 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
|
||||
// If noModTime is set then it
|
||||
func Mount(f fs.Fs, mountpoint string) error {
|
||||
// Mount it
|
||||
FS, errChan, _, err := mount(f, mountpoint)
|
||||
FS, errChan, unmount, err := mount(f, mountpoint)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount FUSE fs")
|
||||
}
|
||||
@@ -217,6 +218,10 @@ func Mount(f fs.Fs, mountpoint string) error {
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -271,7 +272,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
configUserInfoCommand.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
|
||||
flags.BoolVarP(configUserInfoCommand.Flags(), &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
}
|
||||
|
||||
var configUserInfoCommand = &cobra.Command{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -14,11 +15,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copy source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyto source:path dest:path",
|
||||
Short: `Copy files from source to dest, skipping already copied`,
|
||||
Long: `
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -14,11 +15,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the url and use it for destination file path")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the url and use it for destination file path")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "copyurl https://example.com dest:path",
|
||||
Short: `Copy url content to dest.`,
|
||||
Long: `
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -18,11 +19,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on destination")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlag := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlag, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on destination")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of a crypted remote.`,
|
||||
Long: `
|
||||
@@ -86,7 +88,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
||||
return true, false
|
||||
}
|
||||
@@ -95,7 +97,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(dst, "Error computing hash: %v", err)
|
||||
return true, false
|
||||
}
|
||||
@@ -104,7 +106,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false
|
||||
}
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
flagSet := commandDefinition.Flags()
|
||||
flags.BoolVarP(flagSet, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "dbhashsum remote:path",
|
||||
Short: `Produces a Dropbox hash file for all the objects in the path.`,
|
||||
Long: `
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -14,11 +15,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlag := commandDefinition.Flags()
|
||||
flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "dedupe [mode] remote:path",
|
||||
Short: `Interactively find duplicate files and delete/rename them.`,
|
||||
Long: `
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "delete remote:path",
|
||||
Short: `Remove the contents of path.`,
|
||||
Long: `
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
const gendocFrontmatterTemplate = `---
|
||||
@@ -28,7 +28,7 @@ url: %s
|
||||
---
|
||||
`
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "gendocs output_directory",
|
||||
Short: `Output markdown docs for rclone to the directory supplied.`,
|
||||
Long: `
|
||||
|
||||
@@ -46,10 +46,11 @@ __rclone_custom_func() {
|
||||
else
|
||||
__rclone_init_completion -n : || return
|
||||
fi
|
||||
local rclone=(command rclone --ask-password=false)
|
||||
if [[ $cur != *:* ]]; then
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local remotes=($(command rclone listremotes))
|
||||
local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
|
||||
IFS=$ifs
|
||||
local remote
|
||||
for remote in "${remotes[@]}"; do
|
||||
@@ -68,7 +69,7 @@ __rclone_custom_func() {
|
||||
fi
|
||||
local ifs=$IFS
|
||||
IFS=$'\n'
|
||||
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
|
||||
local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
|
||||
IFS=$ifs
|
||||
local line
|
||||
for line in "${lines[@]}"; do
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/info/internal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -41,16 +42,17 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().StringVarP(&writeJSON, "write-json", "", "", "Write results to file.")
|
||||
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
||||
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
|
||||
commandDefintion.Flags().DurationVarP(&uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
|
||||
commandDefintion.Flags().BoolVarP(&checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
|
||||
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
|
||||
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
|
||||
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
|
||||
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
|
||||
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "info [remote:path]+",
|
||||
Short: `Discovers file name or other limitations for paths.`,
|
||||
Long: `rclone info discovers what filenames and upload methods are possible
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "link remote:path",
|
||||
Short: `Generate public link to file/folder.`,
|
||||
Long: `
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -15,11 +16,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&listLong, "long", "", listLong, "Show the type as well as names.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "listremotes",
|
||||
Short: `List all the remotes in the config file.`,
|
||||
Long: `
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "ls remote:path",
|
||||
Short: `List the objects in the path with size and path.`,
|
||||
Long: `
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/ls/lshelp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -16,11 +17,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsd remote:path",
|
||||
Short: `List all directories/containers/buckets in the path.`,
|
||||
Long: `
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/ls/lshelp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -28,20 +29,20 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
flags := commandDefintion.Flags()
|
||||
flags.StringVarP(&format, "format", "F", "p", "Output format - see help for details")
|
||||
flags.StringVarP(&separator, "separator", "s", ";", "Separator for the items in the format.")
|
||||
flags.BoolVarP(&dirSlash, "dir-slash", "d", true, "Append a slash to directory names.")
|
||||
flags.VarP(&hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
|
||||
flags.BoolVarP(&filesOnly, "files-only", "", false, "Only list files.")
|
||||
flags.BoolVarP(&dirsOnly, "dirs-only", "", false, "Only list directories.")
|
||||
flags.BoolVarP(&csv, "csv", "", false, "Output in CSV format.")
|
||||
flags.BoolVarP(&absolute, "absolute", "", false, "Put a leading / in front of path names.")
|
||||
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details")
|
||||
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format.")
|
||||
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names.")
|
||||
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
|
||||
flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files.")
|
||||
flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories.")
|
||||
flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format.")
|
||||
flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names.")
|
||||
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsf remote:path",
|
||||
Short: `List directories and objects in remote:path formatted for parsing`,
|
||||
Long: `
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/ls/lshelp"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -18,17 +19,18 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
|
||||
commandDefintion.Flags().BoolVarP(&opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
|
||||
flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
|
||||
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
|
||||
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
|
||||
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsjson remote:path",
|
||||
Short: `List directories and objects in the path in JSON format.`,
|
||||
Long: `List directories and objects in the path in JSON format.
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "lsl remote:path",
|
||||
Short: `List the objects in path with modification time, size and path.`,
|
||||
Long: `
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "md5sum remote:path",
|
||||
Short: `Produces an md5sum file for all the objects in the path.`,
|
||||
Long: `
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "memtest remote:path",
|
||||
Short: `Load all the objects at remote:path and report memory stats.`,
|
||||
Hidden: true,
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "mkdir remote:path",
|
||||
Short: `Make the path if it doesn't already exist.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
@@ -96,10 +97,15 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||
return nil, translateError(err)
|
||||
}
|
||||
for _, node := range items {
|
||||
name := node.Name()
|
||||
if len(name) >= mountlib.MaxLeafSize {
|
||||
fs.Errorf(d, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
}
|
||||
var dirent = fuse.Dirent{
|
||||
// Inode FIXME ???
|
||||
Type: fuse.DT_File,
|
||||
Name: node.Name(),
|
||||
Name: name,
|
||||
}
|
||||
if node.IsDir() {
|
||||
dirent.Type = fuse.DT_Dir
|
||||
|
||||
@@ -5,6 +5,7 @@ package mount
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
@@ -41,7 +42,12 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
|
||||
// Write data to the file handle
|
||||
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
|
||||
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
|
||||
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
var n int
|
||||
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
|
||||
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
|
||||
} else {
|
||||
n, err = fh.Handle.Write(req.Data)
|
||||
}
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
|
||||
@@ -32,12 +32,10 @@ func mountOptions(device string) (options []fuse.MountOption) {
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device),
|
||||
fuse.VolumeName(mountlib.VolumeName),
|
||||
fuse.AsyncRead(),
|
||||
|
||||
// Options from benchmarking in the fuse module
|
||||
//fuse.MaxReadahead(64 * 1024 * 1024),
|
||||
//fuse.AsyncRead(), - FIXME this causes
|
||||
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
|
||||
// which is probably related to errors people are having
|
||||
//fuse.WritebackCache(),
|
||||
}
|
||||
if mountlib.NoAppleDouble {
|
||||
@@ -139,6 +137,9 @@ func Mount(f fs.Fs, mountpoint string) error {
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
signal.Notify(sigHup, syscall.SIGHUP)
|
||||
atexit.IgnoreSignals()
|
||||
atexit.Register(func() {
|
||||
_ = unmount()
|
||||
})
|
||||
|
||||
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
|
||||
return errors.Wrap(err, "failed to notify systemd")
|
||||
|
||||
@@ -38,6 +38,11 @@ var (
|
||||
DaemonTimeout time.Duration // OSXFUSE only
|
||||
)
|
||||
|
||||
// Global constants
|
||||
const (
|
||||
MaxLeafSize = 4095 // don't pass file names longer than this
|
||||
)
|
||||
|
||||
func init() {
|
||||
// DaemonTimeout defaults to non zero for macOS
|
||||
if runtime.GOOS == "darwin" {
|
||||
@@ -94,7 +99,7 @@ func checkMountpointOverlap(root, mountpoint string) error {
|
||||
|
||||
// NewMountCommand makes a mount command with the given name and Mount function
|
||||
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: `
|
||||
@@ -295,34 +300,34 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
|
||||
}
|
||||
|
||||
// Register the command
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
|
||||
// Add flags
|
||||
flagSet := commandDefintion.Flags()
|
||||
flags.BoolVarP(flagSet, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
|
||||
// mount options
|
||||
flags.BoolVarP(flagSet, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
|
||||
flags.BoolVarP(flagSet, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(flagSet, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(flagSet, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(flagSet, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.FVarP(flagSet, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
flags.DurationVarP(flagSet, &AttrTimeout, "attr-timeout", "", AttrTimeout, "Time for which file/directory attributes are cached.")
|
||||
flags.StringArrayVarP(flagSet, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
|
||||
flags.StringArrayVarP(flagSet, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
flags.BoolVarP(flagSet, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
|
||||
flags.StringVarP(flagSet, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
|
||||
flags.DurationVarP(flagSet, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
|
||||
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
|
||||
flags.BoolVarP(cmdFlags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
|
||||
flags.BoolVarP(cmdFlags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
|
||||
flags.BoolVarP(cmdFlags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
|
||||
flags.BoolVarP(cmdFlags, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
|
||||
flags.FVarP(cmdFlags, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
|
||||
flags.DurationVarP(cmdFlags, &AttrTimeout, "attr-timeout", "", AttrTimeout, "Time for which file/directory attributes are cached.")
|
||||
flags.StringArrayVarP(cmdFlags, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
|
||||
flags.StringArrayVarP(cmdFlags, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
|
||||
flags.BoolVarP(cmdFlags, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
|
||||
flags.StringVarP(cmdFlags, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
|
||||
flags.DurationVarP(cmdFlags, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
|
||||
|
||||
if runtime.GOOS == "darwin" {
|
||||
flags.BoolVarP(flagSet, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
|
||||
flags.BoolVarP(flagSet, &NoAppleXattr, "noapplexattr", "", NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
|
||||
flags.BoolVarP(cmdFlags, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
|
||||
flags.BoolVarP(cmdFlags, &NoAppleXattr, "noapplexattr", "", NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
|
||||
}
|
||||
|
||||
// Add in the generic flags
|
||||
vfsflags.AddFlags(flagSet)
|
||||
vfsflags.AddFlags(cmdFlags)
|
||||
|
||||
return commandDefintion
|
||||
return commandDefinition
|
||||
}
|
||||
|
||||
// ClipBlocks clips the blocks pointed to to the OS max
|
||||
|
||||
@@ -50,6 +50,8 @@ func TestRenameOpenHandle(t *testing.T) {
|
||||
err = file.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
run.waitForWriters()
|
||||
|
||||
// verify file was renamed properly
|
||||
run.checkDir(t, "renamebla 9")
|
||||
|
||||
|
||||
@@ -34,6 +34,11 @@ func osCreate(name string) (*os.File, error) {
|
||||
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
}
|
||||
|
||||
// os.Create with append
|
||||
func osAppend(name string) (*os.File, error) {
|
||||
return os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
|
||||
}
|
||||
|
||||
// TestFileModTimeWithOpenWriters tests mod time on open files
|
||||
func TestFileModTimeWithOpenWriters(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
@@ -78,6 +78,7 @@ func RunTests(t *testing.T, fn MountFn) {
|
||||
t.Run("TestWriteFileDoubleClose", TestWriteFileDoubleClose)
|
||||
t.Run("TestWriteFileFsync", TestWriteFileFsync)
|
||||
t.Run("TestWriteFileDup", TestWriteFileDup)
|
||||
t.Run("TestWriteFileAppend", TestWriteFileAppend)
|
||||
})
|
||||
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok)
|
||||
if !ok {
|
||||
|
||||
@@ -2,6 +2,7 @@ package mounttest
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -130,3 +131,48 @@ func TestWriteFileDup(t *testing.T) {
|
||||
run.waitForWriters()
|
||||
run.rm(t, "to be synced")
|
||||
}
|
||||
|
||||
// TestWriteFileAppend tests that O_APPEND works on cache backends >= writes
|
||||
func TestWriteFileAppend(t *testing.T) {
|
||||
run.skipIfNoFUSE(t)
|
||||
|
||||
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
|
||||
t.Skip("not supported on vfs-cache-mode < writes")
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: Windows needs the v1.5 release of WinFsp to handle O_APPEND properly.
|
||||
// Until it gets released, skip this test on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("currently unsupported on Windows")
|
||||
}
|
||||
|
||||
filepath := run.path("to be synced")
|
||||
fh, err := osCreate(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
testData := []byte("0123456789")
|
||||
appendData := []byte("10")
|
||||
|
||||
_, err = fh.Write(testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
fh, err = osAppend(filepath)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fh.Write(appendData)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fh.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(filepath)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, len(testData)+len(appendData), info.Size())
|
||||
|
||||
run.waitForWriters()
|
||||
run.rm(t, "to be synced")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -16,12 +17,13 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "move source:path dest:path",
|
||||
Short: `Move files from source to dest.`,
|
||||
Long: `
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "moveto source:path dest:path",
|
||||
Short: `Move file or directory from source to dest.`,
|
||||
Long: `
|
||||
|
||||
@@ -24,10 +24,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "ncdu remote:path",
|
||||
Short: `Explore a remote with a text based user interface.`,
|
||||
Long: `
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "obscure password",
|
||||
Short: `Obscure password for use in the rclone.conf`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "purge remote:path",
|
||||
Short: `Remove the path and all of its contents.`,
|
||||
Long: `
|
||||
|
||||
18
cmd/rc/rc.go
18
cmd/rc/rc.go
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,16 +30,17 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&noOutput, "no-output", "", noOutput, "If set don't output the JSON result.")
|
||||
commandDefintion.Flags().StringVarP(&url, "url", "", url, "URL to connect to rclone remote control.")
|
||||
commandDefintion.Flags().StringVarP(&jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.")
|
||||
commandDefintion.Flags().StringVarP(&authUser, "user", "", "", "Username to use to rclone remote control.")
|
||||
commandDefintion.Flags().StringVarP(&authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
|
||||
commandDefintion.Flags().BoolVarP(&loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set don't output the JSON result.")
|
||||
flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control.")
|
||||
flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.")
|
||||
flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control.")
|
||||
flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
|
||||
flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "rc commands parameter",
|
||||
Short: `Run a command against a running rclone.`,
|
||||
Long: `
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "rcat remote:path",
|
||||
Short: `Copies standard input to file on remote.`,
|
||||
Long: `
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "reveal password",
|
||||
Short: `Reveal obscured password from rclone.conf`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "rmdir remote:path",
|
||||
Short: `Remove the path if empty.`,
|
||||
Long: `
|
||||
|
||||
@@ -214,7 +214,7 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
|
||||
|
||||
// serveError returns an http.StatusInternalServerError and logs the error
|
||||
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(what, "%s: %v", text, err)
|
||||
http.Error(w, text+".", http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
@@ -29,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
// Options contains options for the http Server
|
||||
@@ -155,7 +155,7 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
|
||||
PassivePorts: opt.PassivePorts,
|
||||
Auth: s, // implemented by CheckPasswd method
|
||||
Logger: &Logger{},
|
||||
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
|
||||
//TODO implement a maximum of https://godoc.org/goftp.io/server#ServerOpts
|
||||
}
|
||||
s.srv = ftp.NewServer(ftpopt)
|
||||
return s, nil
|
||||
@@ -210,8 +210,8 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
|
||||
// CheckPassword is called with the connection.
|
||||
func findID(callerName []byte) (string, error) {
|
||||
// Dump the stack in this format
|
||||
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
|
||||
// github.com/rclone/rclone/vendor/goftp.io/server.(*Conn).Serve(0xc0000b2680)
|
||||
// /home/ncw/go/src/github.com/rclone/rclone/vendor/goftp.io/server/conn.go:116 +0x11d
|
||||
buf := make([]byte, 4096)
|
||||
n := runtime.Stack(buf, false)
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
ftp "github.com/goftp/server"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -19,6 +18,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
ftp "goftp.io/server"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -68,7 +68,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) {
|
||||
|
||||
// Error logs the error and if a ResponseWriter is given it writes a http.StatusInternalServerError
|
||||
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
|
||||
fs.CountError(err)
|
||||
err = fs.CountError(err)
|
||||
fs.Errorf(what, "%s: %v", text, err)
|
||||
if w != nil {
|
||||
http.Error(w, text+".", http.StatusInternalServerError)
|
||||
|
||||
@@ -208,7 +208,10 @@ func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
|
||||
// The bcrypt cost is a compromise between security and speed. The password is looked up on every
|
||||
// transaction for WebDAV so we store it lightly hashed. An attacker would find it easier to go after
|
||||
// the unencrypted password in memory most likely.
|
||||
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.MinCost)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
@@ -271,7 +271,7 @@ func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote strin
|
||||
|
||||
_, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now())
|
||||
if err != nil {
|
||||
accounting.Stats(r.Context()).Error(err)
|
||||
err = accounting.Stats(r.Context()).Error(err)
|
||||
fs.Errorf(remote, "Post request rcat error: %v", err)
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "settier tier remote:path",
|
||||
Short: `Changes storage class/tier of objects in remote.`,
|
||||
Long: `
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "sha1sum remote:path",
|
||||
Short: `Produces an sha1sum file for all the objects in the path.`,
|
||||
Long: `
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -16,7 +17,8 @@ var jsonOutput bool
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
commandDefinition.Flags().BoolVar(&jsonOutput, "json", false, "format output as JSON")
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "format output as JSON")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -14,11 +15,12 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "sync source:path dest:path",
|
||||
Short: `Make source and dest identical, modifying destination only.`,
|
||||
Long: `
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -21,13 +22,13 @@ const defaultLayout string = "060102"
|
||||
const layoutDateWithTime = "2006-01-02T15:04:05"
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
flags := commandDefintion.Flags()
|
||||
flags.BoolVarP(¬CreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist.")
|
||||
flags.StringVarP(&timeAsArgument, "timestamp", "t", "", "Change the modification times to the specified time instead of the current time of day. The argument is of the form 'YYMMDD' (ex. 17.10.30) or 'YYYY-MM-DDTHH:MM:SS' (ex. 2006-01-02T15:04:05)")
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, ¬CreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist.")
|
||||
flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Change the modification times to the specified time instead of the current time of day. The argument is of the form 'YYMMDD' (ex. 17.10.30) or 'YYYY-MM-DDTHH:MM:SS' (ex. 2006-01-02T15:04:05)")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "touch remote:path",
|
||||
Short: `Create new file or change file modification time.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -28,43 +29,43 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefintion)
|
||||
flags := commandDefintion.Flags()
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
// List
|
||||
flags.BoolVarP(&opts.All, "all", "a", false, "All files are listed (list . files too).")
|
||||
flags.BoolVarP(&opts.DirsOnly, "dirs-only", "d", false, "List directories only.")
|
||||
flags.BoolVarP(&opts.FullPath, "full-path", "", false, "Print the full path prefix for each file.")
|
||||
//flags.BoolVarP(&opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching.")
|
||||
flags.BoolVarP(&noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing.")
|
||||
// flags.BoolVarP(&opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories.")
|
||||
flags.IntVarP(&opts.DeepLevel, "level", "", 0, "Descend only level directories deep.")
|
||||
// flags.StringVarP(&opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given.")
|
||||
// flags.StringVarP(&opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern.")
|
||||
flags.StringVarP(&outFileName, "output", "o", "", "Output to file instead of stdout.")
|
||||
flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too).")
|
||||
flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only.")
|
||||
flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file.")
|
||||
//flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching.")
|
||||
flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories.")
|
||||
flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep.")
|
||||
// flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given.")
|
||||
// flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern.")
|
||||
flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout.")
|
||||
// Files
|
||||
flags.BoolVarP(&opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
|
||||
flags.BoolVarP(&opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
|
||||
flags.BoolVarP(&opts.FileMode, "protections", "p", false, "Print the protections for each file.")
|
||||
// flags.BoolVarP(&opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
|
||||
// flags.BoolVarP(&opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
|
||||
flags.BoolVarP(&opts.Quotes, "quote", "Q", false, "Quote filenames with double quotes.")
|
||||
flags.BoolVarP(&opts.LastMod, "modtime", "D", false, "Print the date of last modification.")
|
||||
// flags.BoolVarP(&opts.Inodes, "inodes", "", false, "Print inode number of each file.")
|
||||
// flags.BoolVarP(&opts.Device, "device", "", false, "Print device ID number to which each file belongs.")
|
||||
flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
|
||||
flags.BoolVarP(cmdFlags, &opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
|
||||
flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
|
||||
flags.BoolVarP(cmdFlags, &opts.Quotes, "quote", "Q", false, "Quote filenames with double quotes.")
|
||||
flags.BoolVarP(cmdFlags, &opts.LastMod, "modtime", "D", false, "Print the date of last modification.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.Inodes, "inodes", "", false, "Print inode number of each file.")
|
||||
// flags.BoolVarP(cmdFlags, &opts.Device, "device", "", false, "Print device ID number to which each file belongs.")
|
||||
// Sort
|
||||
flags.BoolVarP(&opts.NoSort, "unsorted", "U", false, "Leave files unsorted.")
|
||||
flags.BoolVarP(&opts.VerSort, "version", "", false, "Sort files alphanumerically by version.")
|
||||
flags.BoolVarP(&opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time.")
|
||||
flags.BoolVarP(&opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time.")
|
||||
flags.BoolVarP(&opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort.")
|
||||
flags.BoolVarP(&opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).")
|
||||
flags.StringVarP(&sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.")
|
||||
flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted.")
|
||||
flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version.")
|
||||
flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time.")
|
||||
flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time.")
|
||||
flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort.")
|
||||
flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).")
|
||||
flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.")
|
||||
// Graphics
|
||||
flags.BoolVarP(&opts.NoIndent, "noindent", "i", false, "Don't print indentation lines.")
|
||||
flags.BoolVarP(&opts.Colorize, "color", "C", false, "Turn colorization on always.")
|
||||
flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "i", false, "Don't print indentation lines.")
|
||||
flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always.")
|
||||
}
|
||||
|
||||
var commandDefintion = &cobra.Command{
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "tree remote:path",
|
||||
Short: `List the contents of the remote in a tree like fashion.`,
|
||||
Long: `
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/version"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -20,8 +21,8 @@ var (
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
flags := commandDefinition.Flags()
|
||||
flags.BoolVarP(&check, "check", "", false, "Check for new version.")
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version.")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
|
||||
@@ -192,7 +192,7 @@ Contributors
|
||||
* Sheldon Rupp <me@shel.io>
|
||||
* albertony <12441419+albertony@users.noreply.github.com>
|
||||
* cron410 <cron410@gmail.com>
|
||||
* Anagh Kumar Baranwal <anaghk.dos@gmail.com>
|
||||
* Anagh Kumar Baranwal <anaghk.dos@gmail.com> <6824881+darthShadow@users.noreply.github.com>
|
||||
* Felix Brucker <felix@felixbrucker.com>
|
||||
* Santiago Rodríguez <scollazo@users.noreply.github.com>
|
||||
* Craig Miskell <craig.miskell@fluxfederation.com>
|
||||
@@ -263,7 +263,7 @@ Contributors
|
||||
* garry415 <garry.415@gmail.com>
|
||||
* forgems <forgems@gmail.com>
|
||||
* Florian Apolloner <florian@apolloner.eu>
|
||||
* Aleksandar Jankovic <office@ajankovic.com>
|
||||
* Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
|
||||
* Maran <maran@protonmail.com>
|
||||
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
|
||||
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
|
||||
@@ -301,3 +301,18 @@ Contributors
|
||||
* Vighnesh SK <booterror99@gmail.com>
|
||||
* Arijit Biswas <dibbyo456@gmail.com>
|
||||
* Michele Caci <michele.caci@gmail.com>
|
||||
* AlexandrBoltris <ua2fgb@gmail.com>
|
||||
* Bryce Larson <blarson@saltstack.com>
|
||||
* Carlos Ferreyra <crypticmind@gmail.com>
|
||||
* Saksham Khanna <sakshamkhanna@outlook.com>
|
||||
* dausruddin <5763466+dausruddin@users.noreply.github.com>
|
||||
* zero-24 <zero-24@users.noreply.github.com>
|
||||
* Xiaoxing Ye <ye@xiaoxing.us>
|
||||
* Barry Muldrey <barry@muldrey.net>
|
||||
* Sebastian Brandt <sebastian.brandt@friday.de>
|
||||
* Marco Molteni <marco.molteni@mailbox.org>
|
||||
* Ankur Gupta <ankur0493@gmail.com>
|
||||
* Maciej Zimnoch <maciej@scylladb.com>
|
||||
* anuar45 <serdaliyev.anuar@gmail.com>
|
||||
* Fernando <ferferga@users.noreply.github.com>
|
||||
* David Cole <david.cole@sohonet.com>
|
||||
|
||||
@@ -252,6 +252,30 @@ Leave blank normally.
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-box-config-file
|
||||
|
||||
Box App config.json location
|
||||
Leave blank normally.
|
||||
|
||||
- Config: box_config_file
|
||||
- Env Var: RCLONE_BOX_BOX_CONFIG_FILE
|
||||
- Type: string
|
||||
- Default: ""
|
||||
|
||||
#### --box-box-sub-type
|
||||
|
||||
|
||||
|
||||
- Config: box_sub_type
|
||||
- Env Var: RCLONE_BOX_BOX_SUB_TYPE
|
||||
- Type: string
|
||||
- Default: "user"
|
||||
- Examples:
|
||||
- "user"
|
||||
- Rclone should act on behalf of a user
|
||||
- "enterprise"
|
||||
- Rclone should act on behalf of a service account
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to box (Box).
|
||||
|
||||
@@ -1,11 +1,137 @@
|
||||
---
|
||||
title: "Documentation"
|
||||
description: "Rclone Changelog"
|
||||
date: "2019-10-05"
|
||||
date: "2019-11-19"
|
||||
---
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.50.2 - 2019-11-19
|
||||
|
||||
* Bug Fixes
|
||||
* accounting: Fix memory leak on retries operations (Nick Craig-Wood)
|
||||
* Drive
|
||||
* Fix listing of the root directory with drive.files scope (Nick Craig-Wood)
|
||||
* Fix --drive-root-folder-id with team/shared drives (Nick Craig-Wood)
|
||||
|
||||
## v1.50.1 - 2019-11-02
|
||||
|
||||
* Bug Fixes
|
||||
* hash: Fix accidentally changed hash names for `DropboxHash` and `CRC-32` (Nick Craig-Wood)
|
||||
* fshttp: Fix error reporting on tpslimit token bucket errors (Nick Craig-Wood)
|
||||
* fshttp: Don't print token bucket errors on context cancelled (Nick Craig-Wood)
|
||||
* Local
|
||||
* Fix listings of . on Windows (Nick Craig-Wood)
|
||||
* Onedrive
|
||||
* Fix DirMove/Move after Onedrive change (Xiaoxing Ye)
|
||||
|
||||
## v1.50.0 - 2019-10-26
|
||||
|
||||
* New backends
|
||||
* [Citrix Sharefile](/sharefile) (Nick Craig-Wood)
|
||||
* [Chunker](/chunker) - an overlay backend to split files into smaller parts (Ivan Andreev)
|
||||
* [Mail.ru Cloud](/mailru) (Ivan Andreev)
|
||||
* New Features
|
||||
* encodings (Fabian Möller & Nick Craig-Wood)
|
||||
* All backends now use file name encoding to ensure any file name can be written to any backend.
|
||||
* See the [restricted file name docs](/overview/#restricted-filenames) for more info and the [local backend docs](/local/#filenames).
|
||||
* Some file names may look different in rclone if you are using any control characters in names or [unicode FULLWIDTH symbols](https://en.wikipedia.org/wiki/Halfwidth_and_Fullwidth_Forms_(Unicode_block)).
|
||||
* build
|
||||
* Update to use go1.13 for the build (Nick Craig-Wood)
|
||||
* Drop support for go1.9 (Nick Craig-Wood)
|
||||
* Build rclone with GitHub actions (Nick Craig-Wood)
|
||||
* Convert python scripts to python3 (Nick Craig-Wood)
|
||||
* Swap Azure/go-ansiterm for mattn/go-colorable (Nick Craig-Wood)
|
||||
* Dockerfile fixes (Matei David)
|
||||
* Add [plugin support](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#writing-a-plugin) for backends and commands (Richard Patel)
|
||||
* config
|
||||
* Use alternating Red/Green in config to make more obvious (Nick Craig-Wood)
|
||||
* contrib
|
||||
* Add sample DLNA server Docker Compose manifest. (pataquets)
|
||||
* Add sample WebDAV server Docker Compose manifest. (pataquets)
|
||||
* copyurl
|
||||
* Add `--auto-filename` flag for using file name from URL in destination path (Denis)
|
||||
* serve dlna:
|
||||
* Many compatability improvements (Dan Walters)
|
||||
* Support for external srt subtitles (Dan Walters)
|
||||
* rc
|
||||
* Added command core/quit (Saksham Khanna)
|
||||
* Bug Fixes
|
||||
* sync
|
||||
* Make `--update`/`-u` not transfer files that haven't changed (Nick Craig-Wood)
|
||||
* Free objects after they come out of the transfer pipe to save memory (Nick Craig-Wood)
|
||||
* Fix `--files-from without --no-traverse` doing a recursive scan (Nick Craig-Wood)
|
||||
* operations
|
||||
* Fix accounting for server side copies (Nick Craig-Wood)
|
||||
* Display 'All duplicates removed' only if dedupe successful (Sezal Agrawal)
|
||||
* Display 'Deleted X extra copies' only if dedupe successful (Sezal Agrawal)
|
||||
* accounting
|
||||
* Only allow up to 100 completed transfers in the accounting list to save memory (Nick Craig-Wood)
|
||||
* Cull the old time ranges when possible to save memory (Nick Craig-Wood)
|
||||
* Fix panic due to server-side copy fallback (Ivan Andreev)
|
||||
* Fix memory leak noticeable for transfers of large numbers of objects (Nick Craig-Wood)
|
||||
* Fix total duration calculation (Nick Craig-Wood)
|
||||
* cmd
|
||||
* Fix environment variables not setting command line flags (Nick Craig-Wood)
|
||||
* Make autocomplete compatible with bash's posix mode for macOS (Danil Semelenov)
|
||||
* Make `--progress` work in git bash on Windows (Nick Craig-Wood)
|
||||
* Fix 'compopt: command not found' on autocomplete on macOS (Danil Semelenov)
|
||||
* config
|
||||
* Fix setting of non top level flags from environment variables (Nick Craig-Wood)
|
||||
* Check config names more carefully and report errors (Nick Craig-Wood)
|
||||
* Remove error: can't use `--size-only` and `--ignore-size` together. (Nick Craig-Wood)
|
||||
* filter: Prevent mixing options when `--files-from` is in use (Michele Caci)
|
||||
* serve sftp: Fix crash on unsupported operations (eg Readlink) (Nick Craig-Wood)
|
||||
* Mount
|
||||
* Allow files of unkown size to be read properly (Nick Craig-Wood)
|
||||
* Skip tests on <= 2 CPUs to avoid lockup (Nick Craig-Wood)
|
||||
* Fix panic on File.Open (Nick Craig-Wood)
|
||||
* Fix "mount_fusefs: -o timeout=: option not supported" on FreeBSD (Nick Craig-Wood)
|
||||
* Don't pass huge filenames (>4k) to FUSE as it can't cope (Nick Craig-Wood)
|
||||
* VFS
|
||||
* Add flag `--vfs-case-insensitive` for windows/macOS mounts (Ivan Andreev)
|
||||
* Make objects of unknown size readable through the VFS (Nick Craig-Wood)
|
||||
* Move writeback of dirty data out of close() method into its own method (FlushWrites) and remove close() call from Flush() (Brett Dutro)
|
||||
* Stop empty dirs disappearing when renamed on bucket based remotes (Nick Craig-Wood)
|
||||
* Stop change notify polling clearing so much of the directory cache (Nick Craig-Wood)
|
||||
* Azure Blob
|
||||
* Disable logging to the Windows event log (Nick Craig-Wood)
|
||||
* B2
|
||||
* Remove `unverified:` prefix on sha1 to improve interop (eg with CyberDuck) (Nick Craig-Wood)
|
||||
* Box
|
||||
* Add options to get access token via JWT auth (David)
|
||||
* Drive
|
||||
* Disable HTTP/2 by default to work around INTERNAL_ERROR problems (Nick Craig-Wood)
|
||||
* Make sure that drive root ID is always canonical (Nick Craig-Wood)
|
||||
* Fix `--drive-shared-with-me` from the root with lsand `--fast-list` (Nick Craig-Wood)
|
||||
* Fix ChangeNotify polling for shared drives (Nick Craig-Wood)
|
||||
* Fix change notify polling when using appDataFolder (Nick Craig-Wood)
|
||||
* Dropbox
|
||||
* Make disallowed filenames errors not retry (Nick Craig-Wood)
|
||||
* Fix nil pointer exception on restricted files (Nick Craig-Wood)
|
||||
* Fichier
|
||||
* Fix accessing files > 2GB on 32 bit systems (Nick Craig-Wood)
|
||||
* FTP
|
||||
* Allow disabling EPSV mode (Jon Fautley)
|
||||
* HTTP
|
||||
* HEAD directory entries in parallel to speedup (Nick Craig-Wood)
|
||||
* Add `--http-no-head` to stop rclone doing HEAD in listings (Nick Craig-Wood)
|
||||
* Putio
|
||||
* Add ability to resume uploads (Cenk Alti)
|
||||
* S3
|
||||
* Fix signature v2_auth headers (Anthony Rusdi)
|
||||
* Fix encoding for control characters (Nick Craig-Wood)
|
||||
* Only ask for URL encoded directory listings if we need them on Ceph (Nick Craig-Wood)
|
||||
* Add option for multipart failiure behaviour (Aleksandar Jankovic)
|
||||
* Support for multipart copy (庄天翼)
|
||||
* Fix nil pointer reference if no metadata returned for object (Nick Craig-Wood)
|
||||
* SFTP
|
||||
* Fix `--sftp-ask-password` trying to contact the ssh agent (Nick Craig-Wood)
|
||||
* Fix hashes of files with backslashes (Nick Craig-Wood)
|
||||
* Include more ciphers with `--sftp-use-insecure-cipher` (Carlos Ferreyra)
|
||||
* WebDAV
|
||||
* Parse and return Sharepoint error response (Henning Surmeier)
|
||||
|
||||
## v1.49.5 - 2019-10-05
|
||||
|
||||
* Bug Fixes
|
||||
@@ -36,17 +162,15 @@ date: "2019-10-05"
|
||||
* New Features
|
||||
* build: Add Docker workflow support (Alfonso Montero)
|
||||
* Bug Fixes
|
||||
* accounting: Fix locking in Transfer to avoid deadlock with --progress (Nick Craig-Wood)
|
||||
* accounting: Fix locking in Transfer to avoid deadlock with `--progress` (Nick Craig-Wood)
|
||||
* docs: Fix template argument for mktemp in install.sh (Cnly)
|
||||
* operations: Fix -u/--update with google photos / files of unknown size (Nick Craig-Wood)
|
||||
* operations: Fix `-u`/`--update` with google photos / files of unknown size (Nick Craig-Wood)
|
||||
* rc: Fix docs for config/create /update /password (Nick Craig-Wood)
|
||||
* Google Cloud Storage
|
||||
* Fix need for elevated permissions on SetModTime (Nick Craig-Wood)
|
||||
|
||||
## v1.49.1 - 2019-08-28
|
||||
|
||||
Point release to fix config bug and google photos backend.
|
||||
|
||||
* Bug Fixes
|
||||
* config: Fix generated passwords being stored as empty password (Nick Craig-Wood)
|
||||
* rcd: Added missing parameter for web-gui info logs. (Chaitanya)
|
||||
|
||||
@@ -293,7 +293,7 @@ in the same directory).
|
||||
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/chunker/chunker.go then run make backenddocs -->
|
||||
### Standard Options
|
||||
|
||||
Here are the standard options specific to chunker.
|
||||
Here are the standard options specific to chunker (Transparently chunk/split large files).
|
||||
|
||||
#### --chunker-remote
|
||||
|
||||
@@ -341,7 +341,7 @@ Choose how chunker handles hash sums. All modes but "none" require metadata.
|
||||
|
||||
### Advanced Options
|
||||
|
||||
Here are the advanced options specific to chunker.
|
||||
Here are the advanced options specific to chunker (Transparently chunk/split large files).
|
||||
|
||||
#### --chunker-name-format
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone"
|
||||
slug: rclone
|
||||
url: /commands/rclone/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone about"
|
||||
slug: rclone_about
|
||||
url: /commands/rclone_about/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone authorize"
|
||||
slug: rclone_authorize
|
||||
url: /commands/rclone_authorize/
|
||||
@@ -22,7 +22,8 @@ rclone authorize [flags]
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for authorize
|
||||
--auth-no-open-browser Do not automatically open auth link in default browser
|
||||
-h, --help help for authorize
|
||||
```
|
||||
|
||||
See the [global flags page](/flags/) for global options not listed here.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone cachestats"
|
||||
slug: rclone_cachestats
|
||||
url: /commands/rclone_cachestats/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone cat"
|
||||
slug: rclone_cat
|
||||
url: /commands/rclone_cat/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone check"
|
||||
slug: rclone_check
|
||||
url: /commands/rclone_check/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone cleanup"
|
||||
slug: rclone_cleanup
|
||||
url: /commands/rclone_cleanup/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
date: 2019-08-26T15:19:45+01:00
|
||||
date: 2019-10-26T11:04:03+01:00
|
||||
title: "rclone config"
|
||||
slug: rclone_config
|
||||
url: /commands/rclone_config/
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user