1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-23 04:43:21 +00:00

Compare commits

..

1 Commits

708 changed files with 31411 additions and 54601 deletions

View File

@@ -102,7 +102,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
@@ -211,7 +211,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}

View File

@@ -12,7 +12,6 @@ Current active maintainers of rclone are:
| Alex Chen | @Cnly | onedrive backend |
| Sandeep Ummadi | @sandeepkru | azureblob backend |
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
| Ivan Andreev | @ivandeex | chunker & mailru backends |
**This is a work in progress Draft**

14050
MANUAL.html generated

File diff suppressed because one or more lines are too long

1999
MANUAL.md generated

File diff suppressed because it is too large Load Diff

13667
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -46,8 +46,7 @@ endif
rclone:
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
mkdir -p `go env GOPATH`/bin/
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/
test_all:
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
@@ -97,11 +96,6 @@ update:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
# Tidy the module dependencies
tidy:
GO111MODULE=on go mod tidy
GO111MODULE=on go mod vendor
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
rclone.1: MANUAL.md

View File

@@ -15,7 +15,6 @@ This file describes how to make the various kinds of releases
* make test # see integration test server or run locally
* make tag
* edit docs/content/changelog.md
* make tidy
* make doc
* git status - to check for new man pages - git add them
* git commit -a -v -m "Version v1.XX.0"
@@ -89,7 +88,7 @@ Now
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* git co master
* make VERSION=${NEW_TAG} startdev
* make LAST_TAG=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
* git commit --amend

View File

@@ -1 +1 @@
v1.50.2
v1.49.5

View File

@@ -312,9 +312,6 @@ func httpClientFactory(client *http.Client) pipeline.Factory {
//
// this code was copied from azblob.NewPipeline
func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline {
// Don't log stuff to syslog/Windows Event log
pipeline.SetForceLogEnabled(false)
// Closest to API goes first; closest to the wire goes last
factories := []pipeline.Factory{
azblob.NewTelemetryPolicyFactory(o.Telemetry),

View File

@@ -1375,12 +1375,6 @@ func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp
if o.sha1 == "" || o.sha1 == "none" {
o.sha1 = Info[sha1Key]
}
// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
// Some tools (eg Cyberduck) use this
const unverified = "unverified:"
if strings.HasPrefix(o.sha1, unverified) {
o.sha1 = o.sha1[len(unverified):]
}
o.size = Size
// Use the UploadTimestamp if can't get file info
o.modTime = time.Time(UploadTimestamp)

View File

@@ -16,7 +16,7 @@ import (
"sync"
"time"
bolt "github.com/etcd-io/bbolt"
bolt "github.com/coreos/bbolt"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/walk"

View File

@@ -63,7 +63,6 @@ func init() {
Name: "password",
Help: "Password or pass phrase for encryption.",
IsPassword: true,
Required: true,
}, {
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",

View File

@@ -10,7 +10,6 @@ package drive
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"io"
"io/ioutil"
@@ -214,15 +213,7 @@ func init() {
}},
}, {
Name: "root_folder_id",
Help: `ID of the root folder
Leave blank normally.
Fill in to access "Computers" folders (see docs), or for rclone to use
a non root folder as its starting point.
Note that if this is blank, the first time rclone runs it will fill it
in with the ID of the root folder.
`,
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
}, {
Name: "service_account_file",
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
@@ -326,17 +317,6 @@ Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
}, {
Name: "use_shared_date",
Default: false,
Help: `Use date file was shared instead of modified date.
Note that, as with "--drive-use-created-date", this flag may have
unexpected consequences when uploading/downloading files.
If both this flag and "--drive-use-created-date" are set, the created
date is used.`,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
@@ -422,23 +402,9 @@ older versions that have been set to keep forever.`,
This can be useful if you wish to do a server side copy between two
different Google drives. Note that this isn't enabled by default
because it isn't easy to tell if it will work between any two
because it isn't easy to tell if it will work beween any two
configurations.`,
Advanced: true,
}, {
Name: "disable_http2",
Default: true,
Help: `Disable drive using http2
There is currently an unsolved issue with the google drive backend and
HTTP/2. HTTP/2 is therefore disabled by default for the drive backend
but can be re-enabled here. When the issue is solved this flag will
be removed.
See: https://github.com/rclone/rclone/issues/3631
`,
Advanced: true,
}},
})
@@ -474,7 +440,6 @@ type Options struct {
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
@@ -487,7 +452,6 @@ type Options struct {
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
PacerBurst int `config:"pacer_burst"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
}
// Fs represents a remote drive server
@@ -601,23 +565,6 @@ func containsString(slice []string, s string) bool {
return false
}
// getRootID returns the canonical ID for the "root" ID
func (f *Fs) getRootID() (string, error) {
var info *drive.File
var err error
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return "", errors.Wrap(err, "couldn't find root directory ID")
}
return info.Id, nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
@@ -706,9 +653,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
@@ -845,7 +789,7 @@ func configTeamDrive(ctx context.Context, opt *Options, m configmap.Mapper, name
} else {
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
}
if !config.Confirm(false) {
if !config.Confirm() {
return nil
}
client, err := createOAuthClient(opt, name, m)
@@ -896,18 +840,6 @@ func newPacer(opt *Options) *fs.Pacer {
return fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(opt.PacerMinSleep), pacer.Burst(opt.PacerBurst)))
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
})
return &http.Client{
Transport: t,
}
}
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
scopes := driveScopes(opt.Scope)
conf, err := google.JWTConfigFromJSON(credentialsData, scopes...)
@@ -917,7 +849,7 @@ func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client
if opt.Impersonate != "" {
conf.Subject = opt.Impersonate
}
ctxWithSpecialClient := oauthutil.Context(getClient(opt))
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
}
@@ -939,7 +871,7 @@ func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Cli
return nil, errors.Wrap(err, "failed to create oauth client from service account")
}
} else {
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(name, m, driveConfig, getClient(opt))
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to create oauth client")
}
@@ -1036,25 +968,15 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
}
// set root folder for a team drive or query the user root folder
if opt.RootFolderID != "" {
// override root folder if set or cached in the config
f.rootFolderID = opt.RootFolderID
} else if f.isTeamDrive {
if f.isTeamDrive {
f.rootFolderID = f.opt.TeamDriveID
} else {
// Look up the root ID and cache it in the config
rootID, err := f.getRootID()
if err != nil {
if gerr, ok := errors.Cause(err).(*googleapi.Error); ok && gerr.Code == 404 {
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil, err
}
}
f.rootFolderID = rootID
m.Set("root_folder_id", rootID)
f.rootFolderID = "root"
}
// override root folder if set in the config
if opt.RootFolderID != "" {
f.rootFolderID = opt.RootFolderID
}
f.dirCache = dircache.New(root, f.rootFolderID, f)
@@ -1110,8 +1032,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
modifiedDate = info.SharedWithMeTime
}
size := info.Size
if f.opt.SizeAsQuota {
@@ -1480,14 +1400,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if iErr != nil {
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
}
return entries, nil
}
@@ -1546,10 +1458,6 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan list
listRSlices{dirs, paths}.Sort()
var iErr error
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
// shared with me items have no parents when at the root
if f.opt.SharedWithMe && len(item.Parents) == 0 && len(paths) == 1 && paths[0] == "" {
item.Parents = dirs
}
for _, parent := range item.Parents {
// only handle parents that are in the requested dirs list
i := sort.SearchStrings(dirs, parent)
@@ -1618,6 +1526,20 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
if err != nil {
return err
}
if directoryID == "root" {
var info *drive.File
err = f.pacer.CallNoRetry(func() (bool, error) {
info, err = f.svc.Files.Get("root").
Fields("id").
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
return err
}
directoryID = info.Id
}
mu := sync.Mutex{} // protects in and overflow
wg := sync.WaitGroup{}
@@ -1625,7 +1547,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
cb := func(entry fs.DirEntry) error {
mu.Lock()
@@ -1638,7 +1559,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
listed++
return list.Add(entry)
}
@@ -1695,21 +1615,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return err
}
err = list.Flush()
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
}
}
return nil
return list.Flush()
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
@@ -2082,30 +1988,9 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil
}
// teamDriveOK checks to see if we can access the team drive
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
if !f.isTeamDrive {
return nil
}
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Team/Shared Drive info")
}
fs.Debugf(f, "read info from team drive %q", td.Name)
return err
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive {
err := f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
// Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil
}
@@ -2382,11 +2267,9 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
func (f *Fs) changeNotifyStartPageToken() (pageToken string, err error) {
var startPageToken *drive.StartPageToken
err = f.pacer.Call(func() (bool, error) {
changes := f.svc.Changes.GetStartPageToken().SupportsAllDrives(true)
if f.isTeamDrive {
changes.DriveId(f.opt.TeamDriveID)
}
startPageToken, err = changes.Do()
startPageToken, err = f.svc.Changes.GetStartPageToken().
SupportsAllDrives(true).
Do()
return shouldRetry(err)
})
if err != nil {
@@ -2409,11 +2292,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
changesCall.SupportsAllDrives(true)
changesCall.IncludeItemsFromAllDrives(true)
if f.isTeamDrive {
changesCall.DriveId(f.opt.TeamDriveID)
}
// If using appDataFolder then need to add Spaces
if f.rootFolderID == "appDataFolder" {
changesCall.Spaces("appDataFolder")
changesCall.TeamDriveId(f.opt.TeamDriveID)
}
changeList, err = changesCall.Context(ctx).Do()
return shouldRetry(err)

View File

@@ -113,7 +113,7 @@ var (
// Register with Fs
func init() {
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
DbHashType = hash.RegisterHash("Dropbox", 64, dbhash.New)
fs.Register(&fs.RegInfo{
Name: "dropbox",
Description: "Dropbox",

View File

@@ -46,26 +46,13 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API

View File

@@ -4,13 +4,12 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
@@ -26,6 +25,7 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
@@ -41,25 +41,29 @@ const enc = encodings.JottaCloud
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configVersion = 1
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
ClientID: "jottacli",
Endpoint: oauth2.Endpoint{
AuthURL: tokenURL,
TokenURL: tokenURL,
@@ -77,39 +81,43 @@ func init() {
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm() {
return
}
}
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm() {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to parse config version - corrupted config")
log.Fatalf("Failed to register device: %v", err)
}
refresh = ver != configVersion
} else {
refresh = true
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
clientConfig := *fs.Config
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
token, err := doAuth(ctx, srv, loginToken)
token, err := doAuth(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
@@ -119,7 +127,7 @@ func init() {
}
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
if config.Confirm() {
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
@@ -135,8 +143,6 @@ func init() {
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
},
Options: []fs.Option{{
Name: "md5_memory_limit",
@@ -243,51 +249,67 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
values := url.Values{}
values.Set("device_id", randomDeviceName)
// we don't seem to need any data from this link but the API is not happy if skip it
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
NoResponse: true,
}
_, err = srv.Call(ctx, &opts)
if err != nil {
return token, err
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
Parameters: values,
}
// do the first request
var jsonToken api.TokenJSON
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, err
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
token.AccessToken = jsonToken.AccessToken
@@ -449,6 +471,29 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
@@ -459,23 +504,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
var ok bool
var version string
if version, ok = m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = ver == configVersion
}
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")

View File

@@ -16,7 +16,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
httpclient "github.com/koofr/go-httpclient"
@@ -260,9 +259,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
if err != nil {
return nil, err
}
httpClient := httpclient.New()
httpClient.Client = fshttp.NewClient(fs.Config)
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
basicAuth := fmt.Sprintf("Basic %s",
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
client.HTTPClient.Headers.Set("Authorization", basicAuth)

View File

@@ -350,7 +350,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
err = errors.Wrapf(err, "failed to open directory %q", dir)
fs.Errorf(dir, "%v", err)
if isPerm {
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
err = nil // ignore error but fail sync
}
return nil, err
@@ -386,7 +386,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if fierr != nil {
err = errors.Wrapf(err, "failed to read directory %q", namepath)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
}
fis = append(fis, fi)
@@ -409,7 +409,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Skip bad symlinks
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
fs.Errorf(newRemote, "Listing error: %v", err)
err = accounting.Stats(ctx).Error(err)
accounting.Stats(ctx).Error(err)
continue
}
if err != nil {
@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}
@@ -1084,17 +1084,17 @@ func (o *Object) Remove(ctx context.Context) error {
func cleanRootPath(s string, noUNC bool) string {
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = uncPath(s)

View File

@@ -54,7 +54,7 @@ var testsWindows = [][2]string{
{`\\?\UNC\theserver\dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`//?/UNC/theserver/dir\file.txt`, `\\?\UNC\theserver\dir\file.txt`},
{`c:/temp`, `c:\temp`},
{`C:/temp/file.txt`, `C:\temp\file.txt`},
{`/temp/file.txt`, `\temp\file.txt`},
{`c:\!\"#¤%&/()=;:*^?+-`, `c:\!\#¤%&\()=;^+-`},
{`c:\<>"|?*:&\<>"|?*:&\<>"|?*:&`, `c:\&\&\&`},
}

View File

@@ -351,13 +351,8 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
// instead of simply using `drives/driveID/root:/itemPath` because it works for
// "shared with me" folders in OneDrive Personal (See #2536, #2778)
// This path pattern comes from https://github.com/OneDrive/onedrive-api-docs/issues/908#issuecomment-417488480
//
// If `relPath` == '', do not append the slash (See #3664)
func (f *Fs) readMetaDataForPathRelativeToID(ctx context.Context, normalizedID string, relPath string) (info *api.Item, resp *http.Response, err error) {
if relPath != "" {
relPath = "/" + withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath)))
}
opts := newOptsCall(normalizedID, "GET", ":"+relPath)
opts := newOptsCall(normalizedID, "GET", ":/"+withTrailingColon(rest.URLPathEscape(enc.FromStandardPath(relPath))))
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)

View File

@@ -1,43 +0,0 @@
package putio
import (
"fmt"
"net/http"
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs/fserrors"
)
func checkStatusCode(resp *http.Response, expected int) error {
if resp.StatusCode != expected {
return &statusCodeError{response: resp}
}
return nil
}
type statusCodeError struct {
response *http.Response
}
func (e *statusCodeError) Error() string {
return fmt.Sprintf("unexpected status code (%d) response while doing %s to %s", e.response.StatusCode, e.response.Request.Method, e.response.Request.URL.String())
}
func (e *statusCodeError) Temporary() bool {
return e.response.StatusCode == 429 || e.response.StatusCode >= 500
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if perr, ok := err.(*putio.ErrorResponse); ok {
err = &statusCodeError{response: perr.Response}
}
if fserrors.ShouldRetry(err) {
return true, err
}
return false, err
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/putdotio/go-putio/putio"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/oauthutil"
@@ -57,6 +58,23 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(err error) (bool, error) {
if err == nil {
return false, nil
}
if fserrors.ShouldRetry(err) {
return true, err
}
if perr, ok := err.(*putio.ErrorResponse); ok {
if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 {
return true, err
}
}
return false, err
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) {
// defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err)
@@ -300,125 +318,66 @@ func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID
}
func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) {
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", &fileID, &err)
// defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err)
if size == 0 {
err = f.pacer.Call(func() (bool, error) {
fs.Debugf(f, "Sending zero length chunk")
_, fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0)
return shouldRetry(err)
})
return
}
var clientOffset int64
var offsetMismatch bool
var start int64
buf := make([]byte, defaultChunkSize)
for clientOffset < size {
chunkSize := size - clientOffset
if chunkSize >= int64(defaultChunkSize) {
chunkSize = int64(defaultChunkSize)
for start < size {
reqSize := size - start
if reqSize >= int64(defaultChunkSize) {
reqSize = int64(defaultChunkSize)
}
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
chunkStart := clientOffset
reqSize := chunkSize
transferOffset := clientOffset
fs.Debugf(f, "chunkStart: %d, reqSize: %d", chunkStart, reqSize)
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize)
// Transfer the chunk
err = f.pacer.Call(func() (bool, error) {
if offsetMismatch {
// Get file offset and seek to the position
offset, err := f.getServerOffset(ctx, location)
if err != nil {
return shouldRetry(err)
}
sentBytes := offset - chunkStart
fs.Debugf(f, "sentBytes: %d", sentBytes)
_, err = chunk.Seek(sentBytes, io.SeekStart)
if err != nil {
return shouldRetry(err)
}
transferOffset = offset
reqSize = chunkSize - sentBytes
offsetMismatch = false
}
fs.Debugf(f, "Sending chunk. transferOffset: %d length: %d", transferOffset, reqSize)
var serverOffset int64
serverOffset, fileID, err = f.transferChunk(ctx, location, transferOffset, chunk, reqSize)
if cerr, ok := err.(*statusCodeError); ok && cerr.response.StatusCode == 409 {
offsetMismatch = true
return true, err
}
if serverOffset != (transferOffset + reqSize) {
offsetMismatch = true
return true, errors.New("connection broken")
}
fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize)
// TODO get file offset and seek to the position
fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize)
return shouldRetry(err)
})
if err != nil {
return
}
clientOffset += chunkSize
start += reqSize
}
return
}
func (f *Fs) getServerOffset(ctx context.Context, location string) (offset int64, err error) {
// defer log.Trace(f, "location=%v", location)("offset=%v, err=%v", &offset, &err)
req, err := f.makeUploadHeadRequest(ctx, location)
if err != nil {
return 0, err
}
resp, err := f.oAuthClient.Do(req)
if err != nil {
return 0, err
}
err = checkStatusCode(resp, 200)
if err != nil {
return 0, err
}
return strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
}
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (serverOffset, fileID int64, err error) {
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", &fileID, &err)
func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) {
// defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err)
_, _ = chunk.Seek(0, io.SeekStart)
req, err := f.makeUploadPatchRequest(ctx, location, chunk, start, chunkSize)
if err != nil {
return
return 0, err
}
resp, err := f.oAuthClient.Do(req)
req = req.WithContext(ctx)
res, err := f.oAuthClient.Do(req)
if err != nil {
return
return 0, err
}
defer func() {
_ = resp.Body.Close()
_ = res.Body.Close()
}()
err = checkStatusCode(resp, 204)
if err != nil {
return
if res.StatusCode != 204 {
return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode)
}
serverOffset, err = strconv.ParseInt(resp.Header.Get("upload-offset"), 10, 64)
if err != nil {
return
}
sfid := resp.Header.Get("putio-file-id")
sfid := res.Header.Get("putio-file-id")
if sfid != "" {
fileID, err = strconv.ParseInt(sfid, 10, 64)
if err != nil {
return
return 0, err
}
}
return
}
func (f *Fs) makeUploadHeadRequest(ctx context.Context, location string) (*http.Request, error) {
req, err := http.NewRequest("HEAD", location, nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
req.Header.Set("tus-resumable", "1.0.0")
return req, nil
return fileID, nil
}
func (f *Fs) makeUploadPatchRequest(ctx context.Context, location string, in io.Reader, offset, length int64) (*http.Request, error) {

View File

@@ -269,7 +269,7 @@ func qsServiceConnection(opt *Options) (*qs.Service, error) {
cf.Protocol = protocol
cf.Host = host
cf.Port = port
// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
cf.ConnectionRetries = opt.ConnectionRetries
cf.Connection = fshttp.NewClient(fs.Config)
return qs.Init(cf)

View File

@@ -26,7 +26,6 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -694,37 +693,16 @@ The minimum is 0 and the maximum is 5GB.`,
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (eg from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5MB and there can be at
most 10,000 chunks, this means that by default the maximum size of
file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
@@ -793,11 +771,12 @@ WARNING: Storing parts of an incomplete multipart upload counts towards space us
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
@@ -819,7 +798,6 @@ type Options struct {
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
@@ -983,7 +961,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
Client: ec2metadata.New(session.New(), &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3 * time.Minute,
ExpiryWindow: 3,
},
}
cred := credentials.NewChainCredentials(providers)
@@ -1664,7 +1642,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.StorageClass = &f.opt.StorageClass
}
if srcSize >= int64(f.opt.CopyCutoff) {
if srcSize >= int64(f.opt.UploadCutoff) {
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
}
return f.pacer.Call(func() (bool, error) {
@@ -1677,8 +1655,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
start := partIndex * partSize
var ends string
if partIndex == numParts-1 {
if totalSize >= 1 {
ends = strconv.FormatInt(totalSize-1, 10)
if totalSize >= 0 {
ends = strconv.FormatInt(totalSize, 10)
}
} else {
ends = strconv.FormatInt(start+partSize-1, 10)
@@ -1715,7 +1693,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
}
}()
partSize := int64(f.opt.CopyCutoff)
partSize := int64(f.opt.ChunkSize)
numParts := (srcSize-1)/partSize + 1
var parts []*s3.CompletedPart
@@ -1894,9 +1872,6 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = resp.Metadata
if o.meta == nil {
o.meta = map[string]*string{}
}
o.storageClass = aws.StringValue(resp.StorageClass)
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
@@ -1943,6 +1918,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
@@ -1999,8 +1979,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, nil
}
var warnStreamUpload sync.Once
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
bucket, bucketPath := o.split()
@@ -2020,14 +1998,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
// 48GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(o.fs, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, fs.SizeSuffix(u.PartSize*s3manager.MaxUploadParts))
})
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
@@ -2046,7 +2020,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart && !o.fs.opt.DisableChecksum {
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)

View File

@@ -29,17 +29,15 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/time/rate"
)
const (
connectionsPerSecond = 10 // don't make more than this many ssh connections/s
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
@@ -88,19 +86,8 @@ requested from the ssh-agent. This allows to avoid ` + "`Too many authentication
when the ssh-agent contains many keys.`,
Default: false,
}, {
Name: "use_insecure_cipher",
Help: `Enable the use of insecure ciphers and key exchange methods.
This enables the use of the the following insecure ciphers and key exchange methods:
- aes128-cbc
- aes192-cbc
- aes256-cbc
- 3des-cbc
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group-exchange-sha1
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
Name: "use_insecure_cipher",
Help: "Enable the use of the aes128-cbc cipher and diffie-hellman-group-exchange-sha256, diffie-hellman-group-exchange-sha1 key exchange. Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.",
Default: false,
Examples: []fs.OptionExample{
{
@@ -156,11 +143,6 @@ Home directory can be found in a shared folder called "home"
Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
Advanced: true,
}, {
Name: "skip_links",
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
}},
}
fs.Register(fsi)
@@ -182,7 +164,6 @@ type Options struct {
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
}
// Fs stores the interface to the remote SFTP files
@@ -198,7 +179,7 @@ type Fs struct {
cachedHashes *hash.Set
poolMu sync.Mutex
pool []*conn
pacer *fs.Pacer // pacer for operations
connLimit *rate.Limiter // for limiting number of connections per second
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -278,6 +259,10 @@ func (c *conn) closed() error {
// Open a new connection to the SFTP server.
func (f *Fs) sftpConnection() (c *conn, err error) {
// Rate limit rate of new connections
err = f.connLimit.Wait(context.Background())
if err != nil {
return nil, errors.Wrap(err, "limiter failed in connect")
}
c = &conn{
err: make(chan error, 1),
}
@@ -311,14 +296,7 @@ func (f *Fs) getSftpConnection() (c *conn, err error) {
if c != nil {
return c, nil
}
err = f.pacer.Call(func() (bool, error) {
c, err = f.sftpConnection()
if err != nil {
return true, err
}
return false, nil
})
return c, err
return f.sftpConnection()
}
// Return an SFTP connection to the pool
@@ -385,7 +363,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.UseInsecureCipher {
sshConfig.Config.SetDefaults()
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
}
@@ -476,7 +454,7 @@ func NewFsWithConnection(ctx context.Context, name string, root string, m config
config: sshConfig,
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
mkdirLock: newStringLock(),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
@@ -606,16 +584,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
remote := path.Join(dir, info.Name())
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
// pick up the size and type of the destination, instead of the size and type of the symlink.
if !info.Mode().IsRegular() && !info.IsDir() {
if f.opt.SkipLinks {
// skip non regular file if SkipLinks is set
continue
}
if !info.Mode().IsRegular() {
oldInfo := info
info, err = f.stat(remote)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
}
info = oldInfo
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
@@ -531,10 +530,10 @@ type listFn func(remote string, object *swift.Object, isDirectory bool) error
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(container, directory, prefix string, addContainer bool, recurse bool, fn listFn) error {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
if prefix != "" {
prefix += "/"
}
if directory != "" && !strings.HasSuffix(directory, "/") {
if directory != "" {
directory += "/"
}
// Options for ObjectsWalk
@@ -953,18 +952,6 @@ func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(container)
if err != nil {
return false, err
}
xHistoryLocation := headers["X-History-Location"]
if len(xHistoryLocation) > 0 {
return true, nil
}
return false, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
@@ -1096,8 +1083,9 @@ func min(x, y int64) int64 {
//
// if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error {
segmentsContainer, prefix, err := o.getSegmentsDlo()
err = o.fs.listContainerRoot(segmentsContainer, prefix, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
err := o.fs.listContainerRoot(segmentsContainer, containerPath, "", false, true, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
@@ -1126,23 +1114,6 @@ func (o *Object) removeSegments(except string) error {
return nil
}
func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
@@ -1329,9 +1300,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
func (o *Object) Remove(ctx context.Context) error {
container, containerPath := o.split()
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(container, containerPath)
@@ -1340,22 +1314,12 @@ func (o *Object) Remove(ctx context.Context) (err error) {
if err != nil {
return err
}
isDynamicLargeObject, err := o.isDynamicLargeObject()
if err != nil {
return err
}
// ...then segments if required
if isDynamicLargeObject {
isInContainerVersioning, err := o.isInContainerVersioning(container)
err = o.removeSegments("")
if err != nil {
return err
}
if !isInContainerVersioning {
err = o.removeSegments("")
if err != nil {
return err
}
}
}
return nil
}

View File

@@ -113,8 +113,7 @@ type Fs struct {
canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
hasMD5 bool // set if can use owncloud style checksums for MD5
hasSHA1 bool // set if can use owncloud style checksums for SHA1
hasChecksums bool // set if can use owncloud style checksums
}
// Object describes a webdav object
@@ -216,7 +215,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
},
NoRedirect: true,
}
if f.hasMD5 || f.hasSHA1 {
if f.hasChecksums {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
@@ -384,7 +383,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// sets the BearerToken up
func (f *Fs) setBearerToken(token string) {
f.opt.BearerToken = token
f.srv.SetHeader("Authorization", "Bearer "+token)
f.srv.SetHeader("Authorization", "BEARER "+token)
}
// fetch the bearer token using the command
@@ -431,12 +430,11 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.canStream = true
f.precision = time.Second
f.useOCMtime = true
f.hasMD5 = true
f.hasSHA1 = true
f.hasChecksums = true
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
f.hasSHA1 = true
f.hasChecksums = true
case "sharepoint":
// To mount sharepoint, two Cookies are required
// They have to be set instead of BasicAuth
@@ -538,7 +536,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
"Depth": depth,
},
}
if f.hasMD5 || f.hasSHA1 {
if f.hasChecksums {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
@@ -947,14 +945,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
hashes := hash.Set(hash.None)
if f.hasMD5 {
hashes.Add(hash.MD5)
if f.hasChecksums {
return hash.NewHashSet(hash.MD5, hash.SHA1)
}
if f.hasSHA1 {
hashes.Add(hash.SHA1)
}
return hashes
return hash.Set(hash.None)
}
// About gets quota information
@@ -1021,11 +1015,13 @@ func (o *Object) Remote() string {
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == hash.MD5 && o.fs.hasMD5 {
return o.md5, nil
}
if t == hash.SHA1 && o.fs.hasSHA1 {
return o.sha1, nil
if o.fs.hasChecksums {
switch t {
case hash.SHA1:
return o.sha1, nil
case hash.MD5:
return o.md5, nil
}
}
return "", hash.ErrUnsupported
}
@@ -1046,14 +1042,10 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
o.hasMetaData = true
o.size = info.Size
o.modTime = time.Time(info.Modified)
if o.fs.hasMD5 || o.fs.hasSHA1 {
if o.fs.hasChecksums {
hashes := info.Hashes()
if o.fs.hasSHA1 {
o.sha1 = hashes[hash.SHA1]
}
if o.fs.hasMD5 {
o.md5 = hashes[hash.MD5]
}
o.sha1 = hashes[hash.SHA1]
o.md5 = hashes[hash.MD5]
}
return nil
}
@@ -1134,21 +1126,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(ctx, src),
}
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
if o.fs.useOCMtime || o.fs.hasChecksums {
opts.ExtraHeaders = map[string]string{}
if o.fs.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
}
// Set one upload checksum
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
if o.fs.hasSHA1 {
if o.fs.hasChecksums {
// Set an upload checksum - prefer SHA1
//
// This is used as an upload integrity test. If we set
// only SHA1 here, owncloud will calculate the MD5 too.
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
}
}
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
}
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/cobra"
)
@@ -20,9 +19,8 @@ var (
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of SI units")
commandDefinition.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
commandDefinition.Flags().BoolVar(&fullOutput, "full", false, "Full numbers instead of SI units")
}
// printValue formats uv to be output

View File

@@ -3,32 +3,22 @@ package authorize
import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/cobra"
)
var (
noAutoBrowser bool
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "authorize",
Short: `Remote authorization.`,
Long: `
Remote authorization. Used to authorize a remote or headless
rclone from a machine with a browser - use as instructed by
rclone config.
Use the --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.`,
rclone config.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 3, command, args)
config.Authorize(args, noAutoBrowser)
config.Authorize(args)
},
}

View File

@@ -8,7 +8,6 @@ import (
"os"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -23,16 +22,15 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.Int64VarP(cmdFlags, &head, "head", "", head, "Only print the first N characters.")
flags.Int64VarP(cmdFlags, &tail, "tail", "", tail, "Only print the last N characters.")
flags.Int64VarP(cmdFlags, &offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
flags.Int64VarP(cmdFlags, &count, "count", "", count, "Only print N characters.")
flags.BoolVarP(cmdFlags, &discard, "discard", "", discard, "Discard the output instead of printing.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().Int64VarP(&head, "head", "", head, "Only print the first N characters.")
commandDefintion.Flags().Int64VarP(&tail, "tail", "", tail, "Only print the last N characters.")
commandDefintion.Flags().Int64VarP(&offset, "offset", "", offset, "Start printing at offset N (or from end if -ve).")
commandDefintion.Flags().Int64VarP(&count, "count", "", count, "Only print N characters.")
commandDefintion.Flags().BoolVarP(&discard, "discard", "", discard, "Discard the output instead of printing.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "cat remote:path",
Short: `Concatenates any files and sends them to stdout.`,
Long: `

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -16,13 +15,12 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
flags.BoolVarP(cmdFlags, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&download, "download", "", download, "Check by downloading rather than with hash.")
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on remote")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "check source:path dest:path",
Short: `Checks the files in the source and destination match.`,
Long: `

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible`,
Long: `

View File

@@ -82,7 +82,7 @@ func ShowVersion() {
func NewFsFile(remote string) (fs.Fs, string) {
_, _, fsPath, err := fs.ParseRemote(remote)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
f, err := cache.Get(remote)
@@ -92,7 +92,7 @@ func NewFsFile(remote string) (fs.Fs, string) {
case nil:
return f, ""
default:
err = fs.CountError(err)
fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return nil, ""
@@ -107,13 +107,13 @@ func newFsFileAddFilter(remote string) (fs.Fs, string) {
if fileName != "" {
if !filter.Active.InActive() {
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
err = fs.CountError(err)
fs.CountError(err)
log.Fatalf(err.Error())
}
// Limit transfers to this file
err := filter.Active.AddFile(fileName)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
}
}
@@ -135,7 +135,7 @@ func NewFsSrc(args []string) fs.Fs {
func newFsDir(remote string) fs.Fs {
f, err := cache.Get(remote)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return f
@@ -189,11 +189,11 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
fdst, err := cache.Get(dstRemote)
switch err {
case fs.ErrorIsFile:
_ = fs.CountError(err)
fs.CountError(err)
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
case nil:
default:
_ = fs.CountError(err)
fs.CountError(err)
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
}
return
@@ -239,7 +239,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
SigInfoHandler()
for try := 1; try <= *retries; try++ {
err = f()
err = fs.CountError(err)
fs.CountError(err)
lastErr := accounting.GlobalStats().GetLastError()
if err == nil {
err = lastErr
@@ -386,12 +386,12 @@ func initConfig() {
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatal(err)
}
atexit.Register(func() {
@@ -405,17 +405,17 @@ func initConfig() {
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatal(err)
}
err = f.Close()
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
log.Fatal(err)
}
})

View File

@@ -246,12 +246,7 @@ func (fsys *FS) Readdir(dirPath string,
for _, item := range items {
node, ok := item.(vfs.Node)
if ok {
name := node.Name()
if len(name) > mountlib.MaxLeafSize {
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
}
fill(name, nil, 0)
fill(node.Name(), nil, 0)
}
}
itemsRead = len(items)
@@ -371,12 +366,7 @@ func (fsys *FS) Write(path string, buff []byte, ofst int64, fh uint64) (n int) {
if errc != 0 {
return errc
}
var err error
if fsys.VFS.Opt.CacheMode < vfs.CacheModeWrites || handle.Node().Mode()&os.ModeAppend == 0 {
n, err = handle.WriteAt(buff, ofst)
} else {
n, err = handle.Write(buff)
}
n, err := handle.WriteAt(buff, ofst)
if err != nil {
return translateError(err)
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/vfs"
"github.com/rclone/rclone/vfs/vfsflags"
)
@@ -208,7 +207,7 @@ func mount(f fs.Fs, mountpoint string) (*vfs.VFS, <-chan error, func() error, er
// If noModTime is set then it
func Mount(f fs.Fs, mountpoint string) error {
// Mount it
FS, errChan, unmount, err := mount(f, mountpoint)
FS, errChan, _, err := mount(f, mountpoint)
if err != nil {
return errors.Wrap(err, "failed to mount FUSE fs")
}
@@ -218,10 +217,6 @@ func Mount(f fs.Fs, mountpoint string) error {
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.Register(func() {
_ = unmount()
})
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/rc"
"github.com/spf13/cobra"
)
@@ -272,7 +271,7 @@ var (
)
func init() {
flags.BoolVarP(configUserInfoCommand.Flags(), &jsonOutput, "json", "", false, "Format output as JSON")
configUserInfoCommand.Flags().BoolVar(&jsonOutput, "json", false, "Format output as JSON")
}
var configUserInfoCommand = &cobra.Command{

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/spf13/cobra"
@@ -15,12 +14,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after copy")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "copy source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Long: `

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "copyto source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Long: `

View File

@@ -5,7 +5,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -15,12 +14,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the url and use it for destination file path")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&autoFilename, "auto-filename", "a", autoFilename, "Get the file name from the url and use it for destination file path")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "copyurl https://example.com dest:path",
Short: `Copy url content to dest.`,
Long: `

View File

@@ -7,7 +7,6 @@ import (
"github.com/rclone/rclone/backend/crypt"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -19,12 +18,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlag := commandDefinition.Flags()
flags.BoolVarP(cmdFlag, &oneway, "one-way", "", oneway, "Check one way only, source files must exist on destination")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&oneway, "one-way", "", oneway, "Check one way only, source files must exist on destination")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "cryptcheck remote:path cryptedremote:path",
Short: `Cryptcheck checks the integrity of a crypted remote.`,
Long: `
@@ -88,7 +86,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
return true, false
}
@@ -97,7 +95,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
}
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
if err != nil {
err = fs.CountError(err)
fs.CountError(err)
fs.Errorf(dst, "Error computing hash: %v", err)
return true, false
}
@@ -106,7 +104,7 @@ func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
}
if cryptHash != underlyingHash {
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
err = fs.CountError(err)
fs.CountError(err)
fs.Errorf(src, err.Error())
return true, false
}

View File

@@ -18,8 +18,8 @@ var (
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
flagSet := commandDefinition.Flags()
flags.BoolVarP(flagSet, &Reverse, "reverse", "", Reverse, "Reverse cryptdecode, encrypts filenames")
}
var commandDefinition = &cobra.Command{

View File

@@ -12,10 +12,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "dbhashsum remote:path",
Short: `Produces a Dropbox hash file for all the objects in the path.`,
Long: `

View File

@@ -5,7 +5,6 @@ import (
"log"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -15,12 +14,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlag := commandDefinition.Flags()
flags.FVarP(cmdFlag, &dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().VarP(&dedupeMode, "dedupe-mode", "", "Dedupe mode interactive|skip|first|newest|oldest|rename.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "dedupe [mode] remote:path",
Short: `Interactively find duplicate files and delete/rename them.`,
Long: `

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "delete remote:path",
Short: `Remove the contents of path.`,
Long: `

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "deletefile remote:path",
Short: `Remove a single file from remote.`,
Long: `

View File

@@ -17,7 +17,7 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
const gendocFrontmatterTemplate = `---
@@ -28,7 +28,7 @@ url: %s
---
`
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "gendocs output_directory",
Short: `Output markdown docs for rclone to the directory supplied.`,
Long: `

View File

@@ -46,11 +46,10 @@ __rclone_custom_func() {
else
__rclone_init_completion -n : || return
fi
local rclone=(command rclone --ask-password=false)
if [[ $cur != *:* ]]; then
local ifs=$IFS
IFS=$'\n'
local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
local remotes=($(command rclone listremotes))
IFS=$ifs
local remote
for remote in "${remotes[@]}"; do
@@ -69,7 +68,7 @@ __rclone_custom_func() {
fi
local ifs=$IFS
IFS=$'\n'
local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
IFS=$ifs
local line
for line in "${lines[@]}"; do

View File

@@ -22,7 +22,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/info/internal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/lib/random"
@@ -42,17 +41,16 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &writeJSON, "write-json", "", "", "Write results to file.")
flags.BoolVarP(cmdFlags, &checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().StringVarP(&writeJSON, "write-json", "", "", "Write results to file.")
commandDefintion.Flags().BoolVarP(&checkNormalization, "check-normalization", "", true, "Check UTF-8 Normalization.")
commandDefintion.Flags().BoolVarP(&checkControl, "check-control", "", true, "Check control characters.")
commandDefintion.Flags().DurationVarP(&uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
commandDefintion.Flags().BoolVarP(&checkLength, "check-length", "", true, "Check max filename length.")
commandDefintion.Flags().BoolVarP(&checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "info [remote:path]+",
Short: `Discovers file name or other limitations for paths.`,
Long: `rclone info discovers what filenames and upload methods are possible

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "link remote:path",
Short: `Generate public link to file/folder.`,
Long: `

View File

@@ -6,7 +6,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/cobra"
)
@@ -16,12 +15,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &listLong, "long", "", listLong, "Show the type as well as names.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&listLong, "long", "", listLong, "Show the type as well as names.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "listremotes",
Short: `List all the remotes in the config file.`,
Long: `

View File

@@ -11,10 +11,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "ls remote:path",
Short: `List the objects in the path with size and path.`,
Long: `

View File

@@ -7,7 +7,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/ls/lshelp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -17,12 +16,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "lsd remote:path",
Short: `List all directories/containers/buckets in the path.`,
Long: `

View File

@@ -10,7 +10,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/ls/lshelp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -29,20 +28,20 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details")
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format.")
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names.")
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
flags.BoolVarP(cmdFlags, &filesOnly, "files-only", "", false, "Only list files.")
flags.BoolVarP(cmdFlags, &dirsOnly, "dirs-only", "", false, "Only list directories.")
flags.BoolVarP(cmdFlags, &csv, "csv", "", false, "Output in CSV format.")
flags.BoolVarP(cmdFlags, &absolute, "absolute", "", false, "Put a leading / in front of path names.")
flags.BoolVarP(cmdFlags, &recurse, "recursive", "R", false, "Recurse into the listing.")
cmd.Root.AddCommand(commandDefintion)
flags := commandDefintion.Flags()
flags.StringVarP(&format, "format", "F", "p", "Output format - see help for details")
flags.StringVarP(&separator, "separator", "s", ";", "Separator for the items in the format.")
flags.BoolVarP(&dirSlash, "dir-slash", "d", true, "Append a slash to directory names.")
flags.VarP(&hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash")
flags.BoolVarP(&filesOnly, "files-only", "", false, "Only list files.")
flags.BoolVarP(&dirsOnly, "dirs-only", "", false, "Only list directories.")
flags.BoolVarP(&csv, "csv", "", false, "Output in CSV format.")
flags.BoolVarP(&absolute, "absolute", "", false, "Put a leading / in front of path names.")
commandDefintion.Flags().BoolVarP(&recurse, "recursive", "R", false, "Recurse into the listing.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "lsf remote:path",
Short: `List directories and objects in remote:path formatted for parsing`,
Long: `

View File

@@ -9,7 +9,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/ls/lshelp"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -19,18 +18,17 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
flags.BoolVarP(cmdFlags, &opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
flags.BoolVarP(cmdFlags, &opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
flags.BoolVarP(cmdFlags, &opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
flags.BoolVarP(cmdFlags, &opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
flags.BoolVarP(cmdFlags, &opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
flags.BoolVarP(cmdFlags, &opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&opt.Recurse, "recursive", "R", false, "Recurse into the listing.")
commandDefintion.Flags().BoolVarP(&opt.ShowHash, "hash", "", false, "Include hashes in the output (may take longer).")
commandDefintion.Flags().BoolVarP(&opt.NoModTime, "no-modtime", "", false, "Don't read the modification time (can speed things up).")
commandDefintion.Flags().BoolVarP(&opt.ShowEncrypted, "encrypted", "M", false, "Show the encrypted names.")
commandDefintion.Flags().BoolVarP(&opt.ShowOrigIDs, "original", "", false, "Show the ID of the underlying Object.")
commandDefintion.Flags().BoolVarP(&opt.FilesOnly, "files-only", "", false, "Show only files in the listing.")
commandDefintion.Flags().BoolVarP(&opt.DirsOnly, "dirs-only", "", false, "Show only directories in the listing.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "lsjson remote:path",
Short: `List directories and objects in the path in JSON format.`,
Long: `List directories and objects in the path in JSON format.

View File

@@ -11,10 +11,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "lsl remote:path",
Short: `List the objects in path with modification time, size and path.`,
Long: `

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "md5sum remote:path",
Short: `Produces an md5sum file for all the objects in the path.`,
Long: `

View File

@@ -12,10 +12,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "memtest remote:path",
Short: `Load all the objects at remote:path and report memory stats.`,
Hidden: true,

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "mkdir remote:path",
Short: `Make the path if it doesn't already exist.`,
Run: func(command *cobra.Command, args []string) {

View File

@@ -11,7 +11,6 @@ import (
fusefs "bazil.org/fuse/fs"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/vfs"
)
@@ -97,15 +96,10 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
return nil, translateError(err)
}
for _, node := range items {
name := node.Name()
if len(name) >= mountlib.MaxLeafSize {
fs.Errorf(d, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
}
var dirent = fuse.Dirent{
// Inode FIXME ???
Type: fuse.DT_File,
Name: name,
Name: node.Name(),
}
if node.IsDir() {
dirent.Type = fuse.DT_Dir

View File

@@ -5,7 +5,6 @@ package mount
import (
"context"
"io"
"os"
"bazil.org/fuse"
fusefs "bazil.org/fuse/fs"
@@ -42,12 +41,7 @@ var _ fusefs.HandleWriter = (*FileHandle)(nil)
// Write data to the file handle
func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
defer log.Trace(fh, "len=%d, offset=%d", len(req.Data), req.Offset)("written=%d, err=%v", &resp.Size, &err)
var n int
if fh.Handle.Node().VFS().Opt.CacheMode < vfs.CacheModeWrites || fh.Handle.Node().Mode()&os.ModeAppend == 0 {
n, err = fh.Handle.WriteAt(req.Data, req.Offset)
} else {
n, err = fh.Handle.Write(req.Data)
}
n, err := fh.Handle.WriteAt(req.Data, req.Offset)
if err != nil {
return translateError(err)
}

View File

@@ -32,10 +32,12 @@ func mountOptions(device string) (options []fuse.MountOption) {
fuse.Subtype("rclone"),
fuse.FSName(device),
fuse.VolumeName(mountlib.VolumeName),
fuse.AsyncRead(),
// Options from benchmarking in the fuse module
//fuse.MaxReadahead(64 * 1024 * 1024),
//fuse.AsyncRead(), - FIXME this causes
// ReadFileHandle.Read error: read /home/files/ISOs/xubuntu-15.10-desktop-amd64.iso: bad file descriptor
// which is probably related to errors people are having
//fuse.WritebackCache(),
}
if mountlib.NoAppleDouble {
@@ -137,9 +139,6 @@ func Mount(f fs.Fs, mountpoint string) error {
sigHup := make(chan os.Signal, 1)
signal.Notify(sigHup, syscall.SIGHUP)
atexit.IgnoreSignals()
atexit.Register(func() {
_ = unmount()
})
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")

View File

@@ -38,11 +38,6 @@ var (
DaemonTimeout time.Duration // OSXFUSE only
)
// Global constants
const (
MaxLeafSize = 4095 // don't pass file names longer than this
)
func init() {
// DaemonTimeout defaults to non zero for macOS
if runtime.GOOS == "darwin" {
@@ -99,7 +94,7 @@ func checkMountpointOverlap(root, mountpoint string) error {
// NewMountCommand makes a mount command with the given name and Mount function
func NewMountCommand(commandName string, Mount func(f fs.Fs, mountpoint string) error) *cobra.Command {
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: commandName + " remote:path /path/to/mountpoint",
Short: `Mount the remote as file system on a mountpoint.`,
Long: `
@@ -300,34 +295,34 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
}
// Register the command
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
// Add flags
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
flagSet := commandDefintion.Flags()
flags.BoolVarP(flagSet, &DebugFUSE, "debug-fuse", "", DebugFUSE, "Debug the FUSE internals - needs -v.")
// mount options
flags.BoolVarP(cmdFlags, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
flags.BoolVarP(cmdFlags, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
flags.BoolVarP(cmdFlags, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
flags.BoolVarP(cmdFlags, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
flags.BoolVarP(cmdFlags, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
flags.FVarP(cmdFlags, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
flags.DurationVarP(cmdFlags, &AttrTimeout, "attr-timeout", "", AttrTimeout, "Time for which file/directory attributes are cached.")
flags.StringArrayVarP(cmdFlags, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
flags.StringArrayVarP(cmdFlags, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
flags.BoolVarP(cmdFlags, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
flags.StringVarP(cmdFlags, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
flags.DurationVarP(cmdFlags, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
flags.BoolVarP(flagSet, &AllowNonEmpty, "allow-non-empty", "", AllowNonEmpty, "Allow mounting over a non-empty directory.")
flags.BoolVarP(flagSet, &AllowRoot, "allow-root", "", AllowRoot, "Allow access to root user.")
flags.BoolVarP(flagSet, &AllowOther, "allow-other", "", AllowOther, "Allow access to other users.")
flags.BoolVarP(flagSet, &DefaultPermissions, "default-permissions", "", DefaultPermissions, "Makes kernel enforce access control based on the file mode.")
flags.BoolVarP(flagSet, &WritebackCache, "write-back-cache", "", WritebackCache, "Makes kernel buffer writes before sending them to rclone. Without this, writethrough caching is used.")
flags.FVarP(flagSet, &MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads.")
flags.DurationVarP(flagSet, &AttrTimeout, "attr-timeout", "", AttrTimeout, "Time for which file/directory attributes are cached.")
flags.StringArrayVarP(flagSet, &ExtraOptions, "option", "o", []string{}, "Option for libfuse/WinFsp. Repeat if required.")
flags.StringArrayVarP(flagSet, &ExtraFlags, "fuse-flag", "", []string{}, "Flags or arguments to be passed direct to libfuse/WinFsp. Repeat if required.")
flags.BoolVarP(flagSet, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
flags.StringVarP(flagSet, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
flags.DurationVarP(flagSet, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
if runtime.GOOS == "darwin" {
flags.BoolVarP(cmdFlags, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
flags.BoolVarP(cmdFlags, &NoAppleXattr, "noapplexattr", "", NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
flags.BoolVarP(flagSet, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")
flags.BoolVarP(flagSet, &NoAppleXattr, "noapplexattr", "", NoAppleXattr, "Sets the OSXFUSE option noapplexattr.")
}
// Add in the generic flags
vfsflags.AddFlags(cmdFlags)
vfsflags.AddFlags(flagSet)
return commandDefinition
return commandDefintion
}
// ClipBlocks clips the blocks pointed to to the OS max

View File

@@ -50,8 +50,6 @@ func TestRenameOpenHandle(t *testing.T) {
err = file.Close()
require.NoError(t, err)
run.waitForWriters()
// verify file was renamed properly
run.checkDir(t, "renamebla 9")

View File

@@ -34,11 +34,6 @@ func osCreate(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
}
// os.Create with append
func osAppend(name string) (*os.File, error) {
return os.OpenFile(name, os.O_WRONLY|os.O_APPEND, 0666)
}
// TestFileModTimeWithOpenWriters tests mod time on open files
func TestFileModTimeWithOpenWriters(t *testing.T) {
run.skipIfNoFUSE(t)

View File

@@ -78,7 +78,6 @@ func RunTests(t *testing.T, fn MountFn) {
t.Run("TestWriteFileDoubleClose", TestWriteFileDoubleClose)
t.Run("TestWriteFileFsync", TestWriteFileFsync)
t.Run("TestWriteFileDup", TestWriteFileDup)
t.Run("TestWriteFileAppend", TestWriteFileAppend)
})
log.Printf("Finished test run with cache mode %v (ok=%v)", cacheMode, ok)
if !ok {

View File

@@ -2,7 +2,6 @@ package mounttest
import (
"os"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
@@ -131,48 +130,3 @@ func TestWriteFileDup(t *testing.T) {
run.waitForWriters()
run.rm(t, "to be synced")
}
// TestWriteFileAppend tests that O_APPEND works on cache backends >= writes
func TestWriteFileAppend(t *testing.T) {
run.skipIfNoFUSE(t)
if run.vfs.Opt.CacheMode < vfs.CacheModeWrites {
t.Skip("not supported on vfs-cache-mode < writes")
return
}
// TODO: Windows needs the v1.5 release of WinFsp to handle O_APPEND properly.
// Until it gets released, skip this test on Windows.
if runtime.GOOS == "windows" {
t.Skip("currently unsupported on Windows")
}
filepath := run.path("to be synced")
fh, err := osCreate(filepath)
require.NoError(t, err)
testData := []byte("0123456789")
appendData := []byte("10")
_, err = fh.Write(testData)
require.NoError(t, err)
err = fh.Close()
require.NoError(t, err)
fh, err = osAppend(filepath)
require.NoError(t, err)
_, err = fh.Write(appendData)
require.NoError(t, err)
err = fh.Close()
require.NoError(t, err)
info, err := os.Stat(filepath)
require.NoError(t, err)
require.EqualValues(t, len(testData)+len(appendData), info.Size())
run.waitForWriters()
run.rm(t, "to be synced")
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/spf13/cobra"
@@ -17,13 +16,12 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&deleteEmptySrcDirs, "delete-empty-src-dirs", "", deleteEmptySrcDirs, "Delete empty source dirs after move")
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after move")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "move source:path dest:path",
Short: `Move files from source to dest.`,
Long: `

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "moveto source:path dest:path",
Short: `Move file or directory from source to dest.`,
Long: `

View File

@@ -24,10 +24,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "ncdu remote:path",
Short: `Explore a remote with a text based user interface.`,
Long: `

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "obscure password",
Short: `Obscure password for use in the rclone.conf`,
Run: func(command *cobra.Command, args []string) {

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "purge remote:path",
Short: `Remove the path and all of its contents.`,
Long: `

View File

@@ -13,7 +13,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/rc"
"github.com/spf13/cobra"
@@ -30,17 +29,16 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &noOutput, "no-output", "", noOutput, "If set don't output the JSON result.")
flags.StringVarP(cmdFlags, &url, "url", "", url, "URL to connect to rclone remote control.")
flags.StringVarP(cmdFlags, &jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.")
flags.StringVarP(cmdFlags, &authUser, "user", "", "", "Username to use to rclone remote control.")
flags.StringVarP(cmdFlags, &authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
flags.BoolVarP(cmdFlags, &loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&noOutput, "no-output", "", noOutput, "If set don't output the JSON result.")
commandDefintion.Flags().StringVarP(&url, "url", "", url, "URL to connect to rclone remote control.")
commandDefintion.Flags().StringVarP(&jsonInput, "json", "", jsonInput, "Input JSON - use instead of key=value args.")
commandDefintion.Flags().StringVarP(&authUser, "user", "", "", "Username to use to rclone remote control.")
commandDefintion.Flags().StringVarP(&authPass, "pass", "", "", "Password to use to connect to rclone remote control.")
commandDefintion.Flags().BoolVarP(&loopback, "loopback", "", false, "If set connect to this rclone instance not via HTTP.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "rc commands parameter",
Short: `Run a command against a running rclone.`,
Long: `

View File

@@ -12,10 +12,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "rcat remote:path",
Short: `Copies standard input to file on remote.`,
Long: `

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "reveal password",
Short: `Reveal obscured password from rclone.conf`,
Run: func(command *cobra.Command, args []string) {

View File

@@ -9,10 +9,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "rmdir remote:path",
Short: `Remove the path if empty.`,
Long: `

View File

@@ -214,7 +214,7 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
// serveError returns an http.StatusInternalServerError and logs the error
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
err = fs.CountError(err)
fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}

View File

@@ -15,6 +15,7 @@ import (
"strconv"
"sync"
ftp "github.com/goftp/server"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/cmd/serve/proxy"
@@ -28,7 +29,6 @@ import (
"github.com/rclone/rclone/vfs/vfsflags"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
ftp "goftp.io/server"
)
// Options contains options for the http Server
@@ -155,7 +155,7 @@ func newServer(f fs.Fs, opt *Options) (*server, error) {
PassivePorts: opt.PassivePorts,
Auth: s, // implemented by CheckPasswd method
Logger: &Logger{},
//TODO implement a maximum of https://godoc.org/goftp.io/server#ServerOpts
//TODO implement a maximum of https://godoc.org/github.com/goftp/server#ServerOpts
}
s.srv = ftp.NewServer(ftpopt)
return s, nil
@@ -210,8 +210,8 @@ func (l *Logger) PrintResponse(sessionID string, code int, message string) {
// CheckPassword is called with the connection.
func findID(callerName []byte) (string, error) {
// Dump the stack in this format
// github.com/rclone/rclone/vendor/goftp.io/server.(*Conn).Serve(0xc0000b2680)
// /home/ncw/go/src/github.com/rclone/rclone/vendor/goftp.io/server/conn.go:116 +0x11d
// github.com/rclone/rclone/vendor/github.com/goftp/server.(*Conn).Serve(0xc0000b2680)
// /home/ncw/go/src/github.com/rclone/rclone/vendor/github.com/goftp/server/conn.go:116 +0x11d
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
buf = buf[:n]

View File

@@ -11,6 +11,7 @@ import (
"fmt"
"testing"
ftp "github.com/goftp/server"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/cmd/serve/servetest"
"github.com/rclone/rclone/fs"
@@ -18,7 +19,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
ftp "goftp.io/server"
)
const (

View File

@@ -68,7 +68,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) {
// Error logs the error and if a ResponseWriter is given it writes a http.StatusInternalServerError
func Error(what interface{}, w http.ResponseWriter, text string, err error) {
err = fs.CountError(err)
fs.CountError(err)
fs.Errorf(what, "%s: %v", text, err)
if w != nil {
http.Error(w, text+".", http.StatusInternalServerError)

View File

@@ -208,10 +208,7 @@ func (p *Proxy) call(user, pass string, passwordBytes []byte) (value interface{}
if err != nil {
return nil, false, err
}
// The bcrypt cost is a compromise between security and speed. The password is looked up on every
// transaction for WebDAV so we store it lightly hashed. An attacker would find it easier to go after
// the unencrypted password in memory most likely.
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.MinCost)
pwHash, err := bcrypt.GenerateFromPassword(passwordBytes, bcrypt.DefaultCost)
if err != nil {
return nil, false, err
}

View File

@@ -271,7 +271,7 @@ func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote strin
_, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now())
if err != nil {
err = accounting.Stats(r.Context()).Error(err)
accounting.Stats(r.Context()).Error(err)
fs.Errorf(remote, "Post request rcat error: %v", err)
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "settier tier remote:path",
Short: `Changes storage class/tier of objects in remote.`,
Long: `

View File

@@ -10,10 +10,10 @@ import (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmd.Root.AddCommand(commandDefintion)
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "sha1sum remote:path",
Short: `Produces an sha1sum file for all the objects in the path.`,
Long: `

View File

@@ -8,7 +8,6 @@ import (
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -17,8 +16,7 @@ var jsonOutput bool
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "format output as JSON")
commandDefinition.Flags().BoolVar(&jsonOutput, "json", false, "format output as JSON")
}
var commandDefinition = &cobra.Command{

View File

@@ -4,7 +4,6 @@ import (
"context"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/spf13/cobra"
@@ -15,12 +14,11 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
cmd.Root.AddCommand(commandDefintion)
commandDefintion.Flags().BoolVarP(&createEmptySrcDirs, "create-empty-src-dirs", "", createEmptySrcDirs, "Create empty source dirs on destination after sync")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "sync source:path dest:path",
Short: `Make source and dest identical, modifying destination only.`,
Long: `

View File

@@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/object"
"github.com/spf13/cobra"
)
@@ -22,13 +21,13 @@ const defaultLayout string = "060102"
const layoutDateWithTime = "2006-01-02T15:04:05"
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist.")
flags.StringVarP(cmdFlags, &timeAsArgument, "timestamp", "t", "", "Change the modification times to the specified time instead of the current time of day. The argument is of the form 'YYMMDD' (ex. 17.10.30) or 'YYYY-MM-DDTHH:MM:SS' (ex. 2006-01-02T15:04:05)")
cmd.Root.AddCommand(commandDefintion)
flags := commandDefintion.Flags()
flags.BoolVarP(&notCreateNewFile, "no-create", "C", false, "Do not create the file if it does not exist.")
flags.StringVarP(&timeAsArgument, "timestamp", "t", "", "Change the modification times to the specified time instead of the current time of day. The argument is of the form 'YYMMDD' (ex. 17.10.30) or 'YYYY-MM-DDTHH:MM:SS' (ex. 2006-01-02T15:04:05)")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "touch remote:path",
Short: `Create new file or change file modification time.`,
Run: func(command *cobra.Command, args []string) {

View File

@@ -14,7 +14,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/walk"
@@ -29,43 +28,43 @@ var (
)
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
cmd.Root.AddCommand(commandDefintion)
flags := commandDefintion.Flags()
// List
flags.BoolVarP(cmdFlags, &opts.All, "all", "a", false, "All files are listed (list . files too).")
flags.BoolVarP(cmdFlags, &opts.DirsOnly, "dirs-only", "d", false, "List directories only.")
flags.BoolVarP(cmdFlags, &opts.FullPath, "full-path", "", false, "Print the full path prefix for each file.")
//flags.BoolVarP(cmdFlags, &opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching.")
flags.BoolVarP(cmdFlags, &noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing.")
// flags.BoolVarP(cmdFlags, &opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories.")
flags.IntVarP(cmdFlags, &opts.DeepLevel, "level", "", 0, "Descend only level directories deep.")
// flags.StringVarP(cmdFlags, &opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given.")
// flags.StringVarP(cmdFlags, &opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern.")
flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout.")
flags.BoolVarP(&opts.All, "all", "a", false, "All files are listed (list . files too).")
flags.BoolVarP(&opts.DirsOnly, "dirs-only", "d", false, "List directories only.")
flags.BoolVarP(&opts.FullPath, "full-path", "", false, "Print the full path prefix for each file.")
//flags.BoolVarP(&opts.IgnoreCase, "ignore-case", "", false, "Ignore case when pattern matching.")
flags.BoolVarP(&noReport, "noreport", "", false, "Turn off file/directory count at end of tree listing.")
// flags.BoolVarP(&opts.FollowLink, "follow", "l", false, "Follow symbolic links like directories.")
flags.IntVarP(&opts.DeepLevel, "level", "", 0, "Descend only level directories deep.")
// flags.StringVarP(&opts.Pattern, "pattern", "P", "", "List only those files that match the pattern given.")
// flags.StringVarP(&opts.IPattern, "exclude", "", "", "Do not list files that match the given pattern.")
flags.StringVarP(&outFileName, "output", "o", "", "Output to file instead of stdout.")
// Files
flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
flags.BoolVarP(cmdFlags, &opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.")
// flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
// flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
flags.BoolVarP(cmdFlags, &opts.Quotes, "quote", "Q", false, "Quote filenames with double quotes.")
flags.BoolVarP(cmdFlags, &opts.LastMod, "modtime", "D", false, "Print the date of last modification.")
// flags.BoolVarP(cmdFlags, &opts.Inodes, "inodes", "", false, "Print inode number of each file.")
// flags.BoolVarP(cmdFlags, &opts.Device, "device", "", false, "Print device ID number to which each file belongs.")
flags.BoolVarP(&opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
flags.BoolVarP(&opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
flags.BoolVarP(&opts.FileMode, "protections", "p", false, "Print the protections for each file.")
// flags.BoolVarP(&opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
// flags.BoolVarP(&opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")
flags.BoolVarP(&opts.Quotes, "quote", "Q", false, "Quote filenames with double quotes.")
flags.BoolVarP(&opts.LastMod, "modtime", "D", false, "Print the date of last modification.")
// flags.BoolVarP(&opts.Inodes, "inodes", "", false, "Print inode number of each file.")
// flags.BoolVarP(&opts.Device, "device", "", false, "Print device ID number to which each file belongs.")
// Sort
flags.BoolVarP(cmdFlags, &opts.NoSort, "unsorted", "U", false, "Leave files unsorted.")
flags.BoolVarP(cmdFlags, &opts.VerSort, "version", "", false, "Sort files alphanumerically by version.")
flags.BoolVarP(cmdFlags, &opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time.")
flags.BoolVarP(cmdFlags, &opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time.")
flags.BoolVarP(cmdFlags, &opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort.")
flags.BoolVarP(cmdFlags, &opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).")
flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.")
flags.BoolVarP(&opts.NoSort, "unsorted", "U", false, "Leave files unsorted.")
flags.BoolVarP(&opts.VerSort, "version", "", false, "Sort files alphanumerically by version.")
flags.BoolVarP(&opts.ModSort, "sort-modtime", "t", false, "Sort files by last modification time.")
flags.BoolVarP(&opts.CTimeSort, "sort-ctime", "", false, "Sort files by last status change time.")
flags.BoolVarP(&opts.ReverSort, "sort-reverse", "r", false, "Reverse the order of the sort.")
flags.BoolVarP(&opts.DirSort, "dirsfirst", "", false, "List directories before files (-U disables).")
flags.StringVarP(&sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime.")
// Graphics
flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "i", false, "Don't print indentation lines.")
flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always.")
flags.BoolVarP(&opts.NoIndent, "noindent", "i", false, "Don't print indentation lines.")
flags.BoolVarP(&opts.Colorize, "color", "C", false, "Turn colorization on always.")
}
var commandDefinition = &cobra.Command{
var commandDefintion = &cobra.Command{
Use: "tree remote:path",
Short: `List the contents of the remote in a tree like fashion.`,
Long: `

View File

@@ -10,7 +10,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/version"
"github.com/spf13/cobra"
)
@@ -21,8 +20,8 @@ var (
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version.")
flags := commandDefinition.Flags()
flags.BoolVarP(&check, "check", "", false, "Check for new version.")
}
var commandDefinition = &cobra.Command{

View File

@@ -192,7 +192,7 @@ Contributors
* Sheldon Rupp <me@shel.io>
* albertony <12441419+albertony@users.noreply.github.com>
* cron410 <cron410@gmail.com>
* Anagh Kumar Baranwal <anaghk.dos@gmail.com> <6824881+darthShadow@users.noreply.github.com>
* Anagh Kumar Baranwal <anaghk.dos@gmail.com>
* Felix Brucker <felix@felixbrucker.com>
* Santiago Rodríguez <scollazo@users.noreply.github.com>
* Craig Miskell <craig.miskell@fluxfederation.com>
@@ -263,7 +263,7 @@ Contributors
* garry415 <garry.415@gmail.com>
* forgems <forgems@gmail.com>
* Florian Apolloner <florian@apolloner.eu>
* Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
* Aleksandar Jankovic <office@ajankovic.com>
* Maran <maran@protonmail.com>
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
@@ -301,18 +301,3 @@ Contributors
* Vighnesh SK <booterror99@gmail.com>
* Arijit Biswas <dibbyo456@gmail.com>
* Michele Caci <michele.caci@gmail.com>
* AlexandrBoltris <ua2fgb@gmail.com>
* Bryce Larson <blarson@saltstack.com>
* Carlos Ferreyra <crypticmind@gmail.com>
* Saksham Khanna <sakshamkhanna@outlook.com>
* dausruddin <5763466+dausruddin@users.noreply.github.com>
* zero-24 <zero-24@users.noreply.github.com>
* Xiaoxing Ye <ye@xiaoxing.us>
* Barry Muldrey <barry@muldrey.net>
* Sebastian Brandt <sebastian.brandt@friday.de>
* Marco Molteni <marco.molteni@mailbox.org>
* Ankur Gupta <ankur0493@gmail.com>
* Maciej Zimnoch <maciej@scylladb.com>
* anuar45 <serdaliyev.anuar@gmail.com>
* Fernando <ferferga@users.noreply.github.com>
* David Cole <david.cole@sohonet.com>

View File

@@ -252,30 +252,6 @@ Leave blank normally.
- Type: string
- Default: ""
#### --box-box-config-file
Box App config.json location
Leave blank normally.
- Config: box_config_file
- Env Var: RCLONE_BOX_BOX_CONFIG_FILE
- Type: string
- Default: ""
#### --box-box-sub-type
- Config: box_sub_type
- Env Var: RCLONE_BOX_BOX_SUB_TYPE
- Type: string
- Default: "user"
- Examples:
- "user"
- Rclone should act on behalf of a user
- "enterprise"
- Rclone should act on behalf of a service account
### Advanced Options
Here are the advanced options specific to box (Box).

View File

@@ -1,137 +1,11 @@
---
title: "Documentation"
description: "Rclone Changelog"
date: "2019-11-19"
date: "2019-10-05"
---
# Changelog
## v1.50.2 - 2019-11-19
* Bug Fixes
* accounting: Fix memory leak on retries operations (Nick Craig-Wood)
* Drive
* Fix listing of the root directory with drive.files scope (Nick Craig-Wood)
* Fix --drive-root-folder-id with team/shared drives (Nick Craig-Wood)
## v1.50.1 - 2019-11-02
* Bug Fixes
* hash: Fix accidentally changed hash names for `DropboxHash` and `CRC-32` (Nick Craig-Wood)
* fshttp: Fix error reporting on tpslimit token bucket errors (Nick Craig-Wood)
* fshttp: Don't print token bucket errors on context cancelled (Nick Craig-Wood)
* Local
* Fix listings of . on Windows (Nick Craig-Wood)
* Onedrive
* Fix DirMove/Move after Onedrive change (Xiaoxing Ye)
## v1.50.0 - 2019-10-26
* New backends
* [Citrix Sharefile](/sharefile) (Nick Craig-Wood)
* [Chunker](/chunker) - an overlay backend to split files into smaller parts (Ivan Andreev)
* [Mail.ru Cloud](/mailru) (Ivan Andreev)
* New Features
* encodings (Fabian Möller & Nick Craig-Wood)
* All backends now use file name encoding to ensure any file name can be written to any backend.
* See the [restricted file name docs](/overview/#restricted-filenames) for more info and the [local backend docs](/local/#filenames).
* Some file names may look different in rclone if you are using any control characters in names or [unicode FULLWIDTH symbols](https://en.wikipedia.org/wiki/Halfwidth_and_Fullwidth_Forms_(Unicode_block)).
* build
* Update to use go1.13 for the build (Nick Craig-Wood)
* Drop support for go1.9 (Nick Craig-Wood)
* Build rclone with GitHub actions (Nick Craig-Wood)
* Convert python scripts to python3 (Nick Craig-Wood)
* Swap Azure/go-ansiterm for mattn/go-colorable (Nick Craig-Wood)
* Dockerfile fixes (Matei David)
* Add [plugin support](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#writing-a-plugin) for backends and commands (Richard Patel)
* config
* Use alternating Red/Green in config to make more obvious (Nick Craig-Wood)
* contrib
* Add sample DLNA server Docker Compose manifest. (pataquets)
* Add sample WebDAV server Docker Compose manifest. (pataquets)
* copyurl
* Add `--auto-filename` flag for using file name from URL in destination path (Denis)
* serve dlna:
* Many compatability improvements (Dan Walters)
* Support for external srt subtitles (Dan Walters)
* rc
* Added command core/quit (Saksham Khanna)
* Bug Fixes
* sync
* Make `--update`/`-u` not transfer files that haven't changed (Nick Craig-Wood)
* Free objects after they come out of the transfer pipe to save memory (Nick Craig-Wood)
* Fix `--files-from without --no-traverse` doing a recursive scan (Nick Craig-Wood)
* operations
* Fix accounting for server side copies (Nick Craig-Wood)
* Display 'All duplicates removed' only if dedupe successful (Sezal Agrawal)
* Display 'Deleted X extra copies' only if dedupe successful (Sezal Agrawal)
* accounting
* Only allow up to 100 completed transfers in the accounting list to save memory (Nick Craig-Wood)
* Cull the old time ranges when possible to save memory (Nick Craig-Wood)
* Fix panic due to server-side copy fallback (Ivan Andreev)
* Fix memory leak noticeable for transfers of large numbers of objects (Nick Craig-Wood)
* Fix total duration calculation (Nick Craig-Wood)
* cmd
* Fix environment variables not setting command line flags (Nick Craig-Wood)
* Make autocomplete compatible with bash's posix mode for macOS (Danil Semelenov)
* Make `--progress` work in git bash on Windows (Nick Craig-Wood)
* Fix 'compopt: command not found' on autocomplete on macOS (Danil Semelenov)
* config
* Fix setting of non top level flags from environment variables (Nick Craig-Wood)
* Check config names more carefully and report errors (Nick Craig-Wood)
* Remove error: can't use `--size-only` and `--ignore-size` together. (Nick Craig-Wood)
* filter: Prevent mixing options when `--files-from` is in use (Michele Caci)
* serve sftp: Fix crash on unsupported operations (eg Readlink) (Nick Craig-Wood)
* Mount
* Allow files of unkown size to be read properly (Nick Craig-Wood)
* Skip tests on <= 2 CPUs to avoid lockup (Nick Craig-Wood)
* Fix panic on File.Open (Nick Craig-Wood)
* Fix "mount_fusefs: -o timeout=: option not supported" on FreeBSD (Nick Craig-Wood)
* Don't pass huge filenames (>4k) to FUSE as it can't cope (Nick Craig-Wood)
* VFS
* Add flag `--vfs-case-insensitive` for windows/macOS mounts (Ivan Andreev)
* Make objects of unknown size readable through the VFS (Nick Craig-Wood)
* Move writeback of dirty data out of close() method into its own method (FlushWrites) and remove close() call from Flush() (Brett Dutro)
* Stop empty dirs disappearing when renamed on bucket based remotes (Nick Craig-Wood)
* Stop change notify polling clearing so much of the directory cache (Nick Craig-Wood)
* Azure Blob
* Disable logging to the Windows event log (Nick Craig-Wood)
* B2
* Remove `unverified:` prefix on sha1 to improve interop (eg with CyberDuck) (Nick Craig-Wood)
* Box
* Add options to get access token via JWT auth (David)
* Drive
* Disable HTTP/2 by default to work around INTERNAL_ERROR problems (Nick Craig-Wood)
* Make sure that drive root ID is always canonical (Nick Craig-Wood)
* Fix `--drive-shared-with-me` from the root with lsand `--fast-list` (Nick Craig-Wood)
* Fix ChangeNotify polling for shared drives (Nick Craig-Wood)
* Fix change notify polling when using appDataFolder (Nick Craig-Wood)
* Dropbox
* Make disallowed filenames errors not retry (Nick Craig-Wood)
* Fix nil pointer exception on restricted files (Nick Craig-Wood)
* Fichier
* Fix accessing files > 2GB on 32 bit systems (Nick Craig-Wood)
* FTP
* Allow disabling EPSV mode (Jon Fautley)
* HTTP
* HEAD directory entries in parallel to speedup (Nick Craig-Wood)
* Add `--http-no-head` to stop rclone doing HEAD in listings (Nick Craig-Wood)
* Putio
* Add ability to resume uploads (Cenk Alti)
* S3
* Fix signature v2_auth headers (Anthony Rusdi)
* Fix encoding for control characters (Nick Craig-Wood)
* Only ask for URL encoded directory listings if we need them on Ceph (Nick Craig-Wood)
* Add option for multipart failiure behaviour (Aleksandar Jankovic)
* Support for multipart copy (庄天翼)
* Fix nil pointer reference if no metadata returned for object (Nick Craig-Wood)
* SFTP
* Fix `--sftp-ask-password` trying to contact the ssh agent (Nick Craig-Wood)
* Fix hashes of files with backslashes (Nick Craig-Wood)
* Include more ciphers with `--sftp-use-insecure-cipher` (Carlos Ferreyra)
* WebDAV
* Parse and return Sharepoint error response (Henning Surmeier)
## v1.49.5 - 2019-10-05
* Bug Fixes
@@ -162,15 +36,17 @@ date: "2019-11-19"
* New Features
* build: Add Docker workflow support (Alfonso Montero)
* Bug Fixes
* accounting: Fix locking in Transfer to avoid deadlock with `--progress` (Nick Craig-Wood)
* accounting: Fix locking in Transfer to avoid deadlock with --progress (Nick Craig-Wood)
* docs: Fix template argument for mktemp in install.sh (Cnly)
* operations: Fix `-u`/`--update` with google photos / files of unknown size (Nick Craig-Wood)
* operations: Fix -u/--update with google photos / files of unknown size (Nick Craig-Wood)
* rc: Fix docs for config/create /update /password (Nick Craig-Wood)
* Google Cloud Storage
* Fix need for elevated permissions on SetModTime (Nick Craig-Wood)
## v1.49.1 - 2019-08-28
Point release to fix config bug and google photos backend.
* Bug Fixes
* config: Fix generated passwords being stored as empty password (Nick Craig-Wood)
* rcd: Added missing parameter for web-gui info logs. (Chaitanya)

View File

@@ -293,7 +293,7 @@ in the same directory).
<!--- autogenerated options start - DO NOT EDIT, instead edit fs.RegInfo in backend/chunker/chunker.go then run make backenddocs -->
### Standard Options
Here are the standard options specific to chunker (Transparently chunk/split large files).
Here are the standard options specific to chunker.
#### --chunker-remote
@@ -341,7 +341,7 @@ Choose how chunker handles hash sums. All modes but "none" require metadata.
### Advanced Options
Here are the advanced options specific to chunker (Transparently chunk/split large files).
Here are the advanced options specific to chunker.
#### --chunker-name-format

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone"
slug: rclone
url: /commands/rclone/

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone about"
slug: rclone_about
url: /commands/rclone_about/

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone authorize"
slug: rclone_authorize
url: /commands/rclone_authorize/
@@ -22,8 +22,7 @@ rclone authorize [flags]
### Options
```
--auth-no-open-browser Do not automatically open auth link in default browser
-h, --help help for authorize
-h, --help help for authorize
```
See the [global flags page](/flags/) for global options not listed here.

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone cachestats"
slug: rclone_cachestats
url: /commands/rclone_cachestats/

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone cat"
slug: rclone_cat
url: /commands/rclone_cat/

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone check"
slug: rclone_check
url: /commands/rclone_check/

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone cleanup"
slug: rclone_cleanup
url: /commands/rclone_cleanup/

View File

@@ -1,5 +1,5 @@
---
date: 2019-10-26T11:04:03+01:00
date: 2019-08-26T15:19:45+01:00
title: "rclone config"
slug: rclone_config
url: /commands/rclone_config/

Some files were not shown because too many files have changed in this diff Show More