1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-24 12:13:19 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Stephen Harris
f11255a801 sftp: allow cert based auth via optional pubkey 2020-09-24 14:51:35 -04:00
132 changed files with 484 additions and 1159 deletions

View File

@@ -124,8 +124,6 @@ jobs:
- name: Install Libraries on macOS
shell: bash
run: |
brew untap local/homebrew-openssl # workaround for https://github.com/actions/virtual-environments/issues/1811
brew untap local/homebrew-python2 # workaround for https://github.com/actions/virtual-environments/issues/1811
brew update
brew cask install osxfuse
if: matrix.os == 'macOS-latest'

View File

@@ -86,7 +86,7 @@ git reset --soft HEAD~2 # This squashes the 2 latest commits together.
git status # Check what will happen, if you made a mistake resetting, you can run git reset 'HEAD@{1}' to undo.
git commit # Add a new commit message.
git push --force # Push the squashed commit to your GitHub repo.
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also recommends wizardzines.com
# For more, see Stack Overflow, Git docs, or generally Duck around the web. jtagcat also reccommends wizardzines.com
```
## CI for your fork ##

View File

@@ -1245,15 +1245,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
blob := o.getBlobReference()
ac := azblob.BlobAccessConditions{}
var downloadResponse *azblob.DownloadResponse
var dowloadResponse *azblob.DownloadResponse
err = o.fs.pacer.Call(func() (bool, error) {
downloadResponse, err = blob.Download(ctx, offset, count, ac, false)
dowloadResponse, err = blob.Download(ctx, offset, count, ac, false)
return o.fs.shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "failed to open for download")
}
in = downloadResponse.Body(azblob.RetryReaderOptions{})
in = dowloadResponse.Body(azblob.RetryReaderOptions{})
return in, nil
}
@@ -1475,7 +1475,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// FIXME Until https://github.com/Azure/azure-storage-blob-go/pull/75
// is merged the SDK can't upload a single blob of exactly the chunk
// size, so upload with a multipart upload to work around.
// size, so upload with a multpart upload to work around.
// See: https://github.com/rclone/rclone/issues/2653
multipartUpload := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if size == int64(o.fs.opt.ChunkSize) {

View File

@@ -1013,7 +1013,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return info.SharedLink.URL, err
}
// deletePermanently permanently deletes a trashed file
// deletePermanently permenently deletes a trashed file
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
opts := rest.Opts{
Method: "DELETE",

View File

@@ -1,4 +1,4 @@
// multipart upload for box
// multpart upload for box
package box

View File

@@ -296,8 +296,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
ServerSideAcrossConfigs: true,
}).Fill(f).Mask(baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
return f, err
}

View File

@@ -147,7 +147,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// Note that empty passsword makes all 0x00 keys which is used in the
// tests.
func (c *Cipher) Key(password, salt string) (err error) {
const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak)
@@ -633,8 +633,11 @@ func (fh *encrypter) Read(p []byte) (n int, err error) {
}
// possibly err != nil here, but we will process the
// data and the next call to ReadFull will return 0, err
// Write nonce to start of block
copy(fh.buf, fh.nonce[:])
// Encrypt the block using the nonce
secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
block := fh.buf
secretbox.Seal(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
fh.bufIndex = 0
fh.bufSize = blockHeaderSize + n
fh.nonce.increment()
@@ -779,7 +782,8 @@ func (fh *decrypter) fillBuffer() (err error) {
return ErrorEncryptedFileBadHeader
}
// Decrypt the block using the nonce
_, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
block := fh.buf
_, ok := secretbox.Open(block[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey)
if !ok {
if err != nil {
return err // return pending error as it is likely more accurate

View File

@@ -159,7 +159,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
if strings.HasPrefix(remote, name+":") {
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
}
// Make sure to remove trailing . referring to the current dir
// Make sure to remove trailing . reffering to the current dir
if path.Base(rpath) == "." {
rpath = strings.TrimSuffix(rpath, ".")
}

View File

@@ -87,7 +87,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
}
// wrap the object in a crypt for upload using the nonce we
// saved from the encrypter
// saved from the encryptor
src := f.newObjectInfo(oi, nonce)
// Test ObjectInfo methods

View File

@@ -35,7 +35,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@@ -471,21 +470,6 @@ Note that this detection is relying on error message strings which
Google don't document so it may break in the future.
See: https://github.com/rclone/rclone/issues/3857
`,
Advanced: true,
}, {
Name: "stop_on_download_limit",
Default: false,
Help: `Make download limit errors be fatal
At the time of writing it is only possible to download 10TB of data from
Google Drive a day (this is an undocumented limit). When this limit is
reached Google Drive produces a slightly different error message. When
this flag is set it causes these errors to be fatal. These will stop
the in-progress sync.
Note that this detection is relying on error message strings which
Google don't document so it may break in the future.
`,
Advanced: true,
}, {
@@ -555,7 +539,6 @@ type Options struct {
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
DisableHTTP2 bool `config:"disable_http2"`
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -655,9 +638,6 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
return false, fserrors.FatalError(err)
}
return true, err
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
fs.Errorf(f, "Received team drive file limit error: %v", err)
return false, fserrors.FatalError(err)
@@ -2045,10 +2025,10 @@ func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Tim
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.NewObject(ctx, src.Remote())
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -2979,38 +2959,6 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
return f.unTrash(ctx, dir, directoryID, true)
}
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(id, f.fileFields)
if err != nil {
return errors.Wrap(err, "couldn't find id")
}
if info.MimeType == driveFolderType {
return errors.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(info.Name, info)
if err != nil {
return err
}
destDir, destLeaf, err := fspath.Split(dest)
if err != nil {
return err
}
if destLeaf == "" {
destLeaf = info.Name
}
dstFs, err := cache.Get(destDir)
if err != nil {
return err
}
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
return errors.Wrap(err, "copy failed")
}
return nil
}
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@@ -3111,29 +3059,6 @@ Result:
"Errors": 0
}
`,
}, {
Name: "copyid",
Short: "Copy files by ID",
Long: `This command copies files by ID
Usage:
rclone backend copyid drive: ID path
rclone backend copyid drive: ID1 path1 ID2 path2
It copies the drive file with ID given to the path (an rclone path which
will be passed internally to rclone copyto). The ID and path pairs can be
repeated.
The path should end with a / to indicate copy the file as named to
this directory. If it doesn't end with a / then the last path
component will be used as the file name.
If the destination is a drive backend then server side copying will be
attempted if possible.
Use the -i flag to see what would be copied before copying.
`,
}}
// Command the backend to run a named command
@@ -3205,19 +3130,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dir = arg[0]
}
return f.unTrashDir(ctx, dir, true)
case "copyid":
if len(arg)%2 != 0 {
return nil, errors.New("need an even number of arguments")
}
for len(arg) > 0 {
id, dest := arg[0], arg[1]
arg = arg[2:]
err = f.copyID(ctx, id, dest)
if err != nil {
return nil, errors.Wrapf(err, "failed copying %q to %q", id, dest)
}
}
return nil, nil
default:
return nil, fs.ErrorCommandNotFound
}

View File

@@ -7,8 +7,6 @@ import (
"io"
"io/ioutil"
"mime"
"os"
"path"
"path/filepath"
"strings"
"testing"
@@ -274,15 +272,14 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
}
}
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
// TestIntegration/FsMkdir/FsPutFiles/Internal/Shortcuts
func (f *Fs) InternalTestShortcuts(t *testing.T) {
const (
// from fstest/fstests/fstests.go
existingDir = "hello? sausage"
existingFile = `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`
existingSubDir = "êé"
)
ctx := context.Background()
srcObj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
@@ -411,55 +408,6 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
o := obj.(*Object)
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
fi, err := os.Stat(filePath)
require.NoError(t, err)
assert.Equal(t, int64(100), fi.Size())
err = os.Remove(filePath)
require.NoError(t, err)
}
t.Run("BadID", func(t *testing.T) {
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
}
func (f *Fs) InternalTest(t *testing.T) {
// These tests all depend on each other so run them as nested tests
t.Run("DocumentImport", func(t *testing.T) {
@@ -476,7 +424,6 @@ func (f *Fs) InternalTest(t *testing.T) {
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -1193,7 +1193,7 @@ func (o *Object) Size() int64 {
// setMetadataFromEntry sets the fs data from a files.FileMetadata
//
// This isn't a complete set of metadata and has an inaccurate date
// This isn't a complete set of metadata and has an inacurate date
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.id = info.Id
o.bytes = int64(info.Size)

View File

@@ -306,10 +306,10 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
existingObj, err := f.NewObject(ctx, src.Remote())
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)
@@ -323,7 +323,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
if size > int64(100e9) {
return nil, errors.New("File too big, cant upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles

View File

@@ -6,6 +6,7 @@ import (
"crypto/tls"
"io"
"net/textproto"
"os"
"path"
"runtime"
"strings"
@@ -21,15 +22,10 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
var (
currentUser = env.CurrentUser()
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -46,7 +42,7 @@ func init() {
}},
}, {
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser,
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
}, {
Name: "port",
Help: "FTP port, leave blank to use default (21)",
@@ -315,7 +311,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
}
user := opt.User
if user == "" {
user = currentUser
user = os.Getenv("USER")
}
port := opt.Port
if port == "" {

View File

@@ -115,7 +115,7 @@ func TestIntegration(t *testing.T) {
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
})
// Check it is there in the date/month/year hierarchy
// Check it is there in the date/month/year heirachy
// 2013-07-13 is the creation date of the folder
checkPresent := func(t *testing.T, objPath string) {
entries, err := f.List(ctx, objPath)

View File

@@ -4,7 +4,7 @@ package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
// to be revisted after some actual experience.
import (
"context"

View File

@@ -153,9 +153,9 @@ type CustomerInfo struct {
AccountType string `json:"account_type"`
SubscriptionType string `json:"subscription_type"`
Usage int64 `json:"usage"`
Quota int64 `json:"quota"`
Qouta int64 `json:"quota"`
BusinessUsage int64 `json:"business_usage"`
BusinessQuota int64 `json:"business_quota"`
BusinessQouta int64 `json:"business_quota"`
WriteLocked bool `json:"write_locked"`
ReadLocked bool `json:"read_locked"`
LockedCause interface{} `json:"locked_cause"`
@@ -386,7 +386,7 @@ type Error struct {
Cause string `xml:"cause"`
}
// Error returns a string for the error and satisfies the error interface
// Error returns a string for the error and statistifes the error interface
func (e *Error) Error() string {
out := fmt.Sprintf("error %d", e.StatusCode)
if e.Message != "" {

View File

@@ -107,7 +107,7 @@ func init() {
}
}
fmt.Printf("Use legacy authentication?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
fmt.Printf("Use legacy authentification?.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.\n")
if config.Confirm(false) {
v1config(ctx, name, m)
} else {
@@ -230,7 +230,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// v1config configure a jottacloud backend using legacy authentication
// v1config configure a jottacloud backend using legacy authentification
func v1config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config))
@@ -323,7 +323,7 @@ func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegis
return deviceRegistration, err
}
// doAuthV1 runs the actual token request for V1 authentication
// doAuthV1 runs the actual token request for V1 authentification
func doAuthV1(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
@@ -365,7 +365,7 @@ func doAuthV1(ctx context.Context, srv *rest.Client, username, password string)
return token, err
}
// v2config configure a jottacloud backend using the modern JottaCli token based authentication
// v2config configure a jottacloud backend using the modern JottaCli token based authentification
func v2config(ctx context.Context, name string, m configmap.Mapper) {
srv := rest.NewClient(fshttp.NewClient(fs.Config))
@@ -373,9 +373,6 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
m.Set(configClientID, "jottacli")
m.Set(configClientSecret, "")
token, err := doAuthV2(ctx, srv, loginToken, m)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
@@ -387,6 +384,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
fmt.Printf("\nDo you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?\n\n")
if config.Confirm(false) {
oauthConfig.ClientID = "jottacli"
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthConfig)
if err != nil {
log.Fatalf("Failed to load oAuthClient: %s", err)
@@ -405,7 +403,7 @@ func v2config(ctx context.Context, name string, m configmap.Mapper) {
m.Set("configVersion", strconv.Itoa(configVersion))
}
// doAuthV2 runs the actual token request for V2 authentication
// doAuthV2 runs the actual token request for V2 authentification
func doAuthV2(ctx context.Context, srv *rest.Client, loginTokenBase64 string, m configmap.Mapper) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
@@ -553,7 +551,7 @@ func (f *Fs) setEndpointURL() {
if f.opt.Mountpoint == "" {
f.opt.Mountpoint = defaultMountpoint
}
f.endpointURL = path.Join(f.user, f.opt.Device, f.opt.Mountpoint)
f.endpointURL = urlPathEscape(path.Join(f.user, f.opt.Device, f.opt.Mountpoint))
}
// readMetaDataForPath reads the metadata from the path
@@ -1089,7 +1087,8 @@ func (f *Fs) copyOrMove(ctx context.Context, method, src, dest string) (info *ap
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallXML(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
retry, _ := shouldRetry(resp, err)
return (retry && resp.StatusCode != 500), err
})
if err != nil {
return nil, err
@@ -1193,6 +1192,18 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
_, err = f.copyOrMove(ctx, "mvDir", path.Join(f.endpointURL, f.opt.Enc.FromStandardPath(srcPath))+"/", dstRemote)
// surprise! jottacloud fucked up dirmove - the api spits out an error but
// dir gets moved regardless
if apiErr, ok := err.(*api.Error); ok {
if apiErr.StatusCode == 500 {
_, err := f.NewObject(ctx, dstRemote)
if err == fs.ErrorNotAFile {
log.Printf("FIXME: ignoring DirMove error - move succeeded anyway\n")
return nil
}
return err
}
}
if err != nil {
return errors.Wrap(err, "couldn't move directory")
}
@@ -1512,7 +1523,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
// If the file state is INCOMPLETE and CORRPUT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos

View File

@@ -1231,15 +1231,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
o.modTime = info.ModTime()
o.mode = info.Mode()
o.fs.objectMetaMu.Unlock()
// On Windows links read as 0 size so set the correct size here
if runtime.GOOS == "windows" && o.translatedLink {
linkdst, err := os.Readlink(o.path)
if err != nil {
fs.Errorf(o, "Failed to read link size: %v", err)
} else {
o.size = int64(len(linkdst))
}
}
}
// Stat an Object into info

View File

@@ -6,6 +6,7 @@ import (
"os"
"path"
"path/filepath"
"runtime"
"testing"
"time"
@@ -88,6 +89,9 @@ func TestSymlink(t *testing.T) {
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
if runtime.GOOS == "windows" {
file2.Size = 0 // symlinks are 0 length under Windows
}
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -117,6 +121,9 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
if runtime.GOOS == "windows" {
file3.Size = 0 // symlinks are 0 length under Windows
}
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
@@ -135,7 +142,9 @@ func TestSymlink(t *testing.T) {
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
if runtime.GOOS != "windows" {
assert.Equal(t, int64(8), o.Size())
}
// Check that NewObject doesn't see the non suffixed version
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")

View File

@@ -117,7 +117,7 @@ type ListItem struct {
Name string `json:"name"`
Home string `json:"home"`
Size int64 `json:"size"`
Mtime uint64 `json:"mtime,omitempty"`
Mtime int64 `json:"mtime,omitempty"`
Hash string `json:"hash,omitempty"`
VirusScan string `json:"virus_scan,omitempty"`
Tree string `json:"tree,omitempty"`

View File

@@ -192,7 +192,7 @@ This option must not be used by an ordinary user. It is intended only to
facilitate remote troubleshooting of backend issues. Strict meaning of
flags is not documented and not guaranteed to persist between releases.
Quirks will be removed when the backend grows stable.
Supported quirks: atomicmkdir binlist`,
Supported quirks: atomicmkdir binlist gzip insecure retry400`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -238,6 +238,9 @@ func shouldRetry(res *http.Response, err error, f *Fs, opts *rest.Opts) (bool, e
reAuthErr := f.reAuthorize(opts, err)
return reAuthErr == nil, err // return an original error
}
if res != nil && res.StatusCode == 400 && f.quirks.retry400 {
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(res, retryErrorCodes), err
}
@@ -273,7 +276,7 @@ type Fs struct {
root string // root path
opt Options // parsed options
speedupGlobs []string // list of file name patterns eligible for speedup
speedupAny bool // true if all file names are eligible for speedup
speedupAny bool // true if all file names are aligible for speedup
features *fs.Features // optional features
srv *rest.Client // REST API client
cli *http.Client // underlying HTTP client (for authorize)
@@ -339,7 +342,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.UserAgent != "" {
clientConfig.UserAgent = opt.UserAgent
}
clientConfig.NoGzip = true // Mimic official client, skip sending "Accept-Encoding: gzip"
clientConfig.NoGzip = !f.quirks.gzip // Send not "Accept-Encoding: gzip" like official client
f.cli = fshttp.NewClient(&clientConfig)
f.srv = rest.NewClient(f.cli)
@@ -347,6 +350,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
f.srv.SetHeader("Accept", "*/*") // Send "Accept: */*" with every request like official client
f.srv.SetErrorHandler(errorHandler)
if f.quirks.insecure {
transport := f.cli.Transport.(*fshttp.Transport).Transport
transport.TLSClientConfig.InsecureSkipVerify = true
transport.ProxyConnectHeader = http.Header{"User-Agent": {clientConfig.UserAgent}}
}
if err = f.authorize(ctx, false); err != nil {
return nil, err
}
@@ -379,13 +388,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Internal maintenance flags (to be removed when the backend matures).
// Primarily intended to facilitate remote support and troubleshooting.
type quirks struct {
gzip bool
insecure bool
binlist bool
atomicmkdir bool
retry400 bool
}
func (q *quirks) parseQuirks(option string) {
for _, flag := range strings.Split(option, ",") {
switch strings.ToLower(strings.TrimSpace(flag)) {
case "gzip":
// This backend mimics the official client which never sends the
// "Accept-Encoding: gzip" header. However, enabling compression
// might be good for performance.
// Use this quirk to investigate the performance impact.
// Remove this quirk if performance does not improve.
q.gzip = true
case "insecure":
// The mailru disk-o protocol is not documented. To compare HTTP
// stream against the official client one can use Telerik Fiddler,
// which introduces a self-signed certificate. This quirk forces
// the Go http layer to accept it.
// Remove this quirk when the backend reaches maturity.
q.insecure = true
case "binlist":
// The official client sometimes uses a so called "bin" protocol,
// implemented in the listBin file system method below. This method
@@ -398,11 +424,18 @@ func (q *quirks) parseQuirks(option string) {
case "atomicmkdir":
// At the moment rclone requires Mkdir to return success if the
// directory already exists. However, such programs as borgbackup
// use mkdir as a locking primitive and depend on its atomicity.
// Remove this quirk when the above issue is investigated.
// or restic use mkdir as a locking primitive and depend on its
// atomicity. This quirk is a workaround. It can be removed
// when the above issue is investigated.
q.atomicmkdir = true
case "retry400":
// This quirk will help in troubleshooting a very rare "Error 400"
// issue. It can be removed if the problem does not show up
// for a year or so. See the below issue:
// https://github.com/ivandeex/rclone/issues/14
q.retry400 = true
default:
// Ignore unknown flags
// Just ignore all unknown flags
}
}
}
@@ -623,14 +656,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
if err != nil {
return nil, -1, err
}
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
switch item.Kind {
case "folder":
dir := fs.NewDir(remote, time.Unix(mTime, 0)).SetSize(item.Size)
dir := fs.NewDir(remote, time.Unix(item.Mtime, 0)).SetSize(item.Size)
dirSize := item.Count.Files + item.Count.Folders
return dir, dirSize, nil
case "file":
@@ -644,7 +672,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
hasMetaData: true,
size: item.Size,
mrHash: binHash,
modTime: time.Unix(mTime, 0),
modTime: time.Unix(item.Mtime, 0),
}
return file, -1, nil
default:
@@ -2214,7 +2242,7 @@ func (e *endHandler) handle(err error) error {
return io.EOF
}
// serverPool backs server dispatcher
// serverPool backs server dispacher
type serverPool struct {
pool pendingServerMap
mu sync.Mutex

View File

@@ -221,7 +221,7 @@ func (f *Fs) setRoot(root string) {
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// NewFs constructs an Fs from the path, bucket:path
// NewFs contstructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)

View File

@@ -254,7 +254,7 @@ type MoveItemRequest struct {
//Always Type:view and Scope:anonymous for public sharing
type CreateShareLinkRequest struct {
Type string `json:"type"` //Link type in View, Edit or Embed
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymous, organization
Scope string `json:"scope,omitempty"` //Optional. Scope in anonymousi, organization
}
//CreateShareLinkResponse is the response from CreateShareLinkRequest

View File

@@ -1247,10 +1247,6 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, errors.Wrap(err, "about failed")
}
q := drive.Quota
// On (some?) Onedrive sharepoints these are all 0 so return unknown in that case
if q.Total == 0 && q.Used == 0 && q.Deleted == 0 && q.Remaining == 0 {
return &fs.Usage{}, nil
}
usage = &fs.Usage{
Total: fs.NewUsageValue(q.Total), // quota of bytes that can be used
Used: fs.NewUsageValue(q.Used), // bytes in use

View File

@@ -1125,7 +1125,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Special treatment for a 0 length upload. This doesn't work
// with PUT even with Content-Length set (by setting
// opts.Body=0), so upload it as a multipart form POST with
// opts.Body=0), so upload it as a multpart form POST with
// Content-Length set.
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)

View File

@@ -236,10 +236,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) {
// defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err)
existingObj, err := f.NewObject(ctx, src.Remote())
exisitingObj, err := f.NewObject(ctx, src.Remote())
switch err {
case nil:
return existingObj, existingObj.Update(ctx, in, src, options...)
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
case fs.ErrorObjectNotFound:
// Not found so create it
return f.PutUnchecked(ctx, in, src, options...)

View File

@@ -115,7 +115,7 @@ func (o *Object) MimeType(ctx context.Context) string {
// setMetadataFromEntry sets the fs data from a putio.File
//
// This isn't a complete set of metadata and has an inaccurate date
// This isn't a complete set of metadata and has an inacurate date
func (o *Object) setMetadataFromEntry(info putio.File) error {
o.file = &info
o.modtime = info.UpdatedAt.Time

View File

@@ -104,7 +104,7 @@ enough memory, then increasing this will speed up the transfers.`,
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multipart uploads
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link

View File

@@ -5,7 +5,6 @@ import (
"bytes"
"context"
"crypto/md5"
"crypto/tls"
"encoding/base64"
"encoding/hex"
"encoding/xml"
@@ -123,9 +122,6 @@ func init() {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
// References:
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
@@ -135,12 +131,12 @@ func init() {
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
@@ -150,15 +146,9 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-west-3",
Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-south-1",
Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
@@ -174,9 +164,6 @@ func init() {
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
@@ -186,24 +173,6 @@ func init() {
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.",
}, {
Value: "af-south-1",
Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.",
}, {
Value: "cn-north-1",
Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.",
}, {
Value: "cn-northwest-1",
Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.",
}, {
Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.",
}, {
Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.",
}},
}, {
Name: "region",
@@ -616,12 +585,12 @@ func init() {
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
@@ -631,15 +600,9 @@ func init() {
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-west-3",
Help: "EU (Paris) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "eu-south-1",
Help: "EU (Milan) Region.",
}, {
Value: "EU",
Help: "EU Region.",
@@ -654,37 +617,16 @@ func init() {
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul) Region.",
}, {
Value: "ap-northeast-3",
Help: "Asia Pacific (Osaka-Local) Region.",
Help: "Asia Pacific (Seoul)",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai) Region.",
Help: "Asia Pacific (Mumbai)",
}, {
Value: "ap-east-1",
Help: "Asia Pacific (Hong Kong) Region.",
Help: "Asia Pacific (Hong Kong)",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}, {
Value: "me-south-1",
Help: "Middle East (Bahrain) Region.",
}, {
Value: "af-south-1",
Help: "Africa (Cape Town) Region.",
}, {
Value: "cn-north-1",
Help: "China (Beijing) Region",
}, {
Value: "cn-northwest-1",
Help: "China (Ningxia) Region.",
}, {
Value: "us-gov-east-1",
Help: "AWS GovCloud (US-East) Region.",
}, {
Value: "us-gov-west-1",
Help: "AWS GovCloud (US) Region.",
}},
}, {
Name: "location_constraint",
@@ -1204,19 +1146,6 @@ This option controls how often unused buffers will be removed from the pool.`,
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: "disable_http2",
Default: false,
Advanced: true,
Help: `Disable usage of http2 for S3 backends
There is currently an unsolved issue with the s3 (specifically minio) backend
and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be
disabled here. When the issue is solved this flag will be removed.
See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
`,
},
}})
}
@@ -1274,7 +1203,6 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
DisableHTTP2 bool `config:"disable_http2"`
}
// Fs represents a remote s3 server
@@ -1396,19 +1324,6 @@ func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// getClient makes an http client according to the options
func getClient(opt *Options) *http.Client {
// TODO: Do we need cookies too?
t := fshttp.NewTransportCustom(fs.Config, func(t *http.Transport) {
if opt.DisableHTTP2 {
t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
}
})
return &http.Client{
Transport: t,
}
}
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
@@ -1419,7 +1334,6 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
@@ -1488,7 +1402,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
awsConfig := aws.NewConfig().
WithMaxRetries(0). // Rely on rclone's retry logic
WithCredentials(cred).
WithHTTPClient(getClient(opt)).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
@@ -1523,7 +1437,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
v2sign(opt, req.HTTPRequest)
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
@@ -1602,7 +1516,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
srv: getClient(opt),
srv: fshttp.NewClient(fs.Config),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
@@ -2364,7 +2278,7 @@ All the objects shown will be marked for restore, then
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
It returns a list of status dictionaries with Remote and Status
keys. The Status will be OK if it was successful or an error message
keys. The Status will be OK if it was successfull or an error message
if not.
[
@@ -2529,7 +2443,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix so it matches
// directories and objects. This could surprise the user if they ask
// directories and objects. This could suprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
var (
@@ -3062,7 +2976,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// read the md5sum if available
// - for non multipart
// - for non multpart
// - so we can add a ContentMD5
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
@@ -3202,12 +3116,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData(ctx)
// Empty an Etag which is a valid md5sum for multipart
// uploads. This works around a bug in KS3 where the ETag is a
// correctly formed md5sum for multpart uploads
if multipart && matchMd5.MatchString(strings.Trim(strings.ToLower(o.etag), `"`)) {
o.etag = ""
}
return err
}

View File

@@ -9,10 +9,7 @@ import (
"net/http"
"sort"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// URL parameters that need to be added to the signature
@@ -36,20 +33,12 @@ var s3ParamsToSign = map[string]struct{}{
"response-cache-control": {},
"response-content-disposition": {},
"response-content-encoding": {},
"lifecycle": {},
"website": {},
"delete": {},
"cors": {},
"restore": {},
}
// Warn once about empty endpoint
var warnEmptyEndpointOnce sync.Once
// sign signs requests using v2 auth
//
// Cobbled together from goamz and aws-sdk-go
func v2sign(opt *Options, req *http.Request) {
func sign(AccessKey, SecretKey string, req *http.Request) {
// Set date
date := time.Now().UTC().Format(time.RFC1123)
req.Header.Set("Date", date)
@@ -59,26 +48,11 @@ func v2sign(opt *Options, req *http.Request) {
if uri == "" {
uri = "/"
}
// If not using path style then need to stick the bucket on
// the start of the requests if doing a bucket based query
if !opt.ForcePathStyle {
if opt.Endpoint == "" {
warnEmptyEndpointOnce.Do(func() {
fs.Logf(nil, "If using v2 auth with AWS and force_path_style=false, endpoint must be set in the config")
})
} else if req.URL.Host != opt.Endpoint {
// read the bucket off the start of the hostname
i := strings.IndexRune(req.URL.Host, '.')
if i >= 0 {
uri = "/" + req.URL.Host[:i] + uri
}
}
}
// Look through headers of interest
var md5 string
var contentType string
var headersToSign [][2]string // slice of key, value pairs
var headersToSign []string
for k, v := range req.Header {
k = strings.ToLower(k)
switch k {
@@ -89,26 +63,15 @@ func v2sign(opt *Options, req *http.Request) {
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
headersToSign = append(headersToSign, [2]string{k, vall})
headersToSign = append(headersToSign, k+":"+vall)
}
}
}
// Make headers of interest into canonical string
var joinedHeadersToSign string
if len(headersToSign) > 0 {
// sort by keys
sort.Slice(headersToSign, func(i, j int) bool {
return headersToSign[i][0] < headersToSign[j][0]
})
// join into key:value\n
var out strings.Builder
for _, kv := range headersToSign {
out.WriteString(kv[0])
out.WriteRune(':')
out.WriteString(kv[1])
out.WriteRune('\n')
}
joinedHeadersToSign = out.String()
sort.StringSlice(headersToSign).Sort()
joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n"
}
// Look for query parameters which need to be added to the signature
@@ -133,11 +96,11 @@ func v2sign(opt *Options, req *http.Request) {
// Make signature
payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri
hash := hmac.New(sha1.New, []byte(opt.SecretAccessKey))
hash := hmac.New(sha1.New, []byte(SecretKey))
_, _ = hash.Write([]byte(payload))
signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size()))
base64.StdEncoding.Encode(signature, hash.Sum(nil))
// Set signature in request
req.Header.Set("Authorization", "AWS "+opt.AccessKeyID+":"+string(signature))
req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature))
}

View File

@@ -11,6 +11,7 @@ import (
"io"
"io/ioutil"
"os"
"os/user"
"path"
"regexp"
"strconv"
@@ -32,7 +33,6 @@ import (
"github.com/rclone/rclone/lib/readers"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
)
const (
@@ -43,7 +43,7 @@ const (
)
var (
currentUser = env.CurrentUser()
currentUser = readCurrentUser()
)
func init() {
@@ -84,19 +84,7 @@ in the new OpenSSH format can't be used.`,
IsPassword: true,
}, {
Name: "pubkey_file",
Help: `Optional path to public key file.
Set this if you have a signed certificate you want to use for authentication.` + env.ShellExpandHelp,
}, {
Name: "known_hosts_file",
Help: `Optional path to known_hosts file.
Set this value to enable server host key validation.` + env.ShellExpandHelp,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "~/.ssh/known_hosts",
Help: "Use OpenSSH's known_hosts file",
}},
Help: "Optional path to public key file; set this if you have a signed certificate you want to use for authentication." + env.ShellExpandHelp,
}, {
Name: "key_use_agent",
Help: `When set forces the usage of the ssh-agent.
@@ -206,7 +194,6 @@ type Options struct {
KeyFile string `config:"key_file"`
KeyFilePass string `config:"key_file_pass"`
PubKeyFile string `config:"pubkey_file"`
KnownHostsFile string `config:"known_hosts_file"`
KeyUseAgent bool `config:"key_use_agent"`
UseInsecureCipher bool `config:"use_insecure_cipher"`
DisableHashCheck bool `config:"disable_hashcheck"`
@@ -235,7 +222,6 @@ type Fs struct {
poolMu sync.Mutex
pool []*conn
pacer *fs.Pacer // pacer for operations
savedpswd string
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -249,6 +235,20 @@ type Object struct {
sha1sum *string // Cached SHA1 checksum
}
// readCurrentUser finds the current user name or "" if not found
func readCurrentUser() (userName string) {
usr, err := user.Current()
if err == nil {
return usr.Username
}
// Fall back to reading $USER then $LOGNAME
userName = os.Getenv("USER")
if userName != "" {
return userName
}
return os.Getenv("LOGNAME")
}
// dial starts a client connection to the given SSH server. It is a
// convenience function that connects to the given network address,
// initiates the SSH handshake, and then sets up a Client.
@@ -414,10 +414,6 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// This will hold the Fs object. We need to create it here
// so we can refer to it in the SSH callback, but it's populated
// in NewFsWithConnection
f := &Fs{}
ctx := context.Background()
// Parse config into Options struct
opt := new(Options)
@@ -431,7 +427,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if opt.Port == "" {
opt.Port = "22"
}
sshConfig := &ssh.ClientConfig{
User: opt.User,
Auth: []ssh.AuthMethod{},
@@ -440,14 +435,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ClientVersion: "SSH-2.0-" + fs.Config.UserAgent,
}
if opt.KnownHostsFile != "" {
hostcallback, err := knownhosts.New(opt.KnownHostsFile)
if err != nil {
return nil, errors.Wrap(err, "couldn't parse known_hosts_file")
}
sshConfig.HostKeyCallback = hostcallback
}
if opt.UseInsecureCipher {
sshConfig.Config.SetDefaults()
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
@@ -540,16 +527,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// And the signer for this, which includes the private key signer
// This is what we'll pass to the ssh client.
// Normally the ssh client will use the public key built
// into the private key, but we need to tell it to use the user
// specified public key cert. This signer is specific to the
// cert and will include the private key signer. Now ssh
// knows everything it needs.
cert, ok := pk.(*ssh.Certificate)
if !ok {
return nil, errors.New("public key file is not a certificate file: " + pubkeyFile)
}
pubsigner, err := ssh.NewCertSigner(cert, signer)
pubsigner, err := ssh.NewCertSigner(pk.(*ssh.Certificate), signer)
if err != nil {
return nil, errors.Wrap(err, "error generating cert signer")
}
@@ -568,45 +546,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
// Config for password if none was defined and we're allowed to
// We don't ask now; we ask if the ssh connection succeeds
// Ask for password if none was defined and we're allowed to
if opt.Pass == "" && opt.AskPassword {
sshConfig.Auth = append(sshConfig.Auth, ssh.PasswordCallback(f.getPass))
}
return NewFsWithConnection(ctx, f, name, root, m, opt, sshConfig)
}
// If we're in password mode and ssh connection succeeds then this
// callback is called. First time around we ask the user, and then
// save it so on reconnection we give back the previous string.
// This removes the ability to let the user correct a mistaken entry,
// but means that reconnects are transparent.
// We'll re-use config.Pass for this, 'cos we know it's not been
// specified.
func (f *Fs) getPass() (string, error) {
for f.savedpswd == "" {
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
f.savedpswd = config.ReadPassword()
clearpass := config.ReadPassword()
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
}
return f.savedpswd, nil
return NewFsWithConnection(ctx, name, root, m, opt, sshConfig)
}
// NewFsWithConnection creates a new Fs object from the name and root and an ssh.ClientConfig. It connects to
// the host specified in the ssh.ClientConfig
func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
// Populate the Filesystem Object
f.name = name
f.root = root
f.absRoot = root
f.opt = *opt
f.m = m
f.config = sshConfig
f.url = "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root
f.mkdirLock = newStringLock()
f.pacer = fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
f.savedpswd = ""
func NewFsWithConnection(ctx context.Context, name string, root string, m configmap.Mapper, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
f := &Fs{
name: name,
root: root,
absRoot: root,
opt: *opt,
m: m,
config: sshConfig,
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
mkdirLock: newStringLock(),
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
SlowHash: true,
@@ -952,7 +915,7 @@ func (f *Fs) run(cmd string) ([]byte, error) {
session, err := c.sshClient.NewSession()
if err != nil {
return nil, errors.Wrap(err, "run: get SFTP session")
return nil, errors.Wrap(err, "run: get SFTP sessiion")
}
defer func() {
_ = session.Close()

View File

@@ -95,7 +95,7 @@ type UploadSpecification struct {
ChunkURI string `json:"ChunkUri"` // Specifies the URI the client must send the file data to
FinishURI string `json:"FinishUri"` // If provided, specifies the final call the client must perform to finish the upload process
ProgressData string `json:"ProgressData"` // Allows the client to check progress of standard uploads
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supported.
IsResume bool `json:"IsResume"` // Specifies a Resumable upload is supproted.
ResumeIndex int64 `json:"ResumeIndex"` // Specifies the initial index for resuming, if IsResume is true.
ResumeOffset int64 `json:"ResumeOffset"` // Specifies the initial file offset by bytes, if IsResume is true
ResumeFileHash string `json:"ResumeFileHash"` // Specifies the MD5 hash of the first ResumeOffset bytes of the partial file found at the server

View File

@@ -1090,7 +1090,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (dst fs.Obj
} else if err != nil {
return nil, errors.Wrap(err, "copy: failed to examine destination dir")
} else {
// otherwise need to copy via a temporary directory
// otherwise need to copy via a temporary directlry
}
}

View File

@@ -350,7 +350,7 @@ func (f *Fs) getAuth(req *http.Request) (err error) {
// if have auth, check it is in date
if f.opt.Authorization == "" || f.opt.User == "" || f.authExpiry.IsZero() || time.Until(f.authExpiry) < expiryLeeway {
// Get the auth token
f.srv.SetSigner(nil) // temporarily remove the signer so we don't infinitely recurse
f.srv.SetSigner(nil) // temporariliy remove the signer so we don't infinitely recurse
err = f.getAuthToken(ctx)
f.srv.SetSigner(f.getAuth) // replace signer
if err != nil {

View File

@@ -67,12 +67,12 @@ func init() {
log.Fatalf("Couldn't create access grant: %v", err)
}
serializedAccess, err := access.Serialize()
serialziedAccess, err := access.Serialize()
if err != nil {
log.Fatalf("Couldn't serialize access grant: %v", err)
}
configMapper.Set("satellite_address", satellite)
configMapper.Set("access_grant", serializedAccess)
configMapper.Set("access_grant", serialziedAccess)
} else if provider == existingProvider {
config.FileDeleteKey(name, "satellite_address")
config.FileDeleteKey(name, "api_key")

View File

@@ -61,7 +61,7 @@ func (p *EpAll) Action(ctx context.Context, upstreams []*upstream.Fs, path strin
return p.epall(ctx, upstreams, path)
}
// ActionEntries is ACTION category policy but receiving a set of candidate entries
// ActionEntries is ACTION category policy but receivng a set of candidate entries
func (p *EpAll) ActionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound

View File

@@ -106,7 +106,7 @@ func (p *EpMfs) Search(ctx context.Context, upstreams []*upstream.Fs, path strin
return p.mfs(upstreams)
}
// SearchEntries is SEARCH category policy but receiving a set of candidate entries
// SearchEntries is SEARCH category policy but receivng a set of candidate entries
func (p *EpMfs) SearchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
if len(entries) == 0 {
return nil, fs.ErrorObjectNotFound

View File

@@ -14,7 +14,7 @@ func init() {
// FF stands for first found
// Search category: same as epff.
// Action category: same as epff.
// Create category: Given the order of the candidates, act on the first one found.
// Create category: Given the order of the candiates, act on the first one found.
type FF struct {
EpFF
}

View File

@@ -60,7 +60,7 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Upstreams fs.SpaceSepList `config:"upstreams"`
Remotes fs.SpaceSepList `config:"remotes"` // Deprecated
Remotes fs.SpaceSepList `config:"remotes"` // Depreated
ActionPolicy string `config:"action_policy"`
CreatePolicy string `config:"create_policy"`
SearchPolicy string `config:"search_policy"`
@@ -145,16 +145,11 @@ func (f *Fs) Hashes() hash.Set {
// Mkdir makes the root directory of the Fs object
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
upstreams, err := f.create(ctx, dir)
if err == fs.ErrorObjectNotFound {
if dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
} else if dir == "" {
// If root dirs not created then create them
upstreams, err = f.upstreams, nil
if err == fs.ErrorObjectNotFound && dir != parentDir(dir) {
if err := f.Mkdir(ctx, parentDir(dir)); err != nil {
return err
}
upstreams, err = f.create(ctx, dir)
}
if err != nil {
return err
@@ -572,7 +567,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
entriesList := make([][]upstream.Entry, len(f.upstreams))
entriess := make([][]upstream.Entry, len(f.upstreams))
errs := Errors(make([]error, len(f.upstreams)))
multithread(len(f.upstreams), func(i int) {
u := f.upstreams[i]
@@ -585,7 +580,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
for j, e := range entries {
uEntries[j], _ = u.WrapEntry(e)
}
entriesList[i] = uEntries
entriess[i] = uEntries
})
if len(errs) == len(errs.FilterNil()) {
errs = errs.Map(func(e error) error {
@@ -599,7 +594,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
return nil, errs.Err()
}
return f.mergeDirEntries(entriesList)
return f.mergeDirEntries(entriess)
}
// ListR lists the objects and directories of the Fs starting
@@ -619,7 +614,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
var entriesList [][]upstream.Entry
var entriess [][]upstream.Entry
errs := Errors(make([]error, len(f.upstreams)))
var mutex sync.Mutex
multithread(len(f.upstreams), func(i int) {
@@ -631,7 +626,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
uEntries[j], _ = u.WrapEntry(e)
}
mutex.Lock()
entriesList = append(entriesList, uEntries)
entriess = append(entriess, uEntries)
mutex.Unlock()
return nil
}
@@ -658,7 +653,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
return errs.Err()
}
entries, err := f.mergeDirEntries(entriesList)
entries, err := f.mergeDirEntries(entriess)
if err != nil {
return err
}
@@ -729,9 +724,9 @@ func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
return f.searchPolicy.SearchEntries(entries...)
}
func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
func (f *Fs) mergeDirEntries(entriess [][]upstream.Entry) (fs.DirEntries, error) {
entryMap := make(map[string]([]upstream.Entry))
for _, en := range entriesList {
for _, en := range entriess {
if en == nil {
continue
}
@@ -823,7 +818,6 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, err
}
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
var features = (&fs.Features{
CaseInsensitive: true,
DuplicateFiles: false,

View File

@@ -52,7 +52,7 @@ type Object struct {
f *Fs
}
// Entry describe a wrapped fs.DirEntry interface with the
// Entry describe a warpped fs.DirEntry interface with the
// information of upstream Fs
type Entry interface {
fs.DirEntry

View File

@@ -12,7 +12,7 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestWebdavNextcloud:",
RemoteName: "TestWebdavNexcloud:",
NilObject: (*webdav.Object)(nil),
})
}

View File

@@ -20,7 +20,7 @@ type ResourceInfoRequestOptions struct {
Fields []string
}
//ResourceInfoResponse struct is returned by the API for metadata requests.
//ResourceInfoResponse struct is returned by the API for metedata requests.
type ResourceInfoResponse struct {
PublicKey string `json:"public_key"`
Name string `json:"name"`

View File

@@ -141,7 +141,7 @@ def main():
for name in sorted(bugfixes.keys()):
out(name)
# Read old changelog and split
# Read old changlog and split
with open("docs/content/changelog.md") as fd:
old_changelog = fd.read()
heading = "# Changelog"

View File

@@ -19,7 +19,7 @@ var (
// Flags
numberOfFiles = flag.Int("n", 1000, "Number of files to create")
averageFilesPerDirectory = flag.Int("files-per-directory", 10, "Average number of files per directory")
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory hierarchy")
maxDepth = flag.Int("max-depth", 10, "Maximum depth of directory heirachy")
minFileSize = flag.Int64("min-size", 0, "Minimum size of file to create")
maxFileSize = flag.Int64("max-size", 100, "Maximum size of files to create")
minFileNameLength = flag.Int("min-name-length", 4, "Minimum size of file to create")
@@ -61,7 +61,7 @@ func fileName() (name string) {
return name
}
// dir is a directory in the directory hierarchy being built up
// dir is a directory in the directory heirachy being built up
type dir struct {
name string
depth int
@@ -69,7 +69,7 @@ type dir struct {
parent *dir
}
// Create a random directory hierarchy under d
// Create a random directory heirachy under d
func (d *dir) createDirectories() {
for totalDirectories < directoriesToCreate {
newDir := &dir{
@@ -91,7 +91,7 @@ func (d *dir) createDirectories() {
return
}
// list the directory hierarchy
// list the directory heirachy
func (d *dir) list(path string, output []string) []string {
dirPath := filepath.Join(path, d.name)
output = append(output, dirPath)

View File

@@ -1,4 +1,4 @@
// Package cmd implements the rclone command
// Package cmd implemnts the rclone command
//
// It is in a sub package so it's internals can be re-used elsewhere
package cmd
@@ -21,7 +21,6 @@ import (
"sync"
"time"
systemd "github.com/iguanesolutions/go-systemd/v5"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
@@ -36,7 +35,6 @@ import (
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
@@ -290,11 +288,6 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
}
fs.Debugf(nil, "%d go routines active\n", runtime.NumGoroutine())
if fs.Config.Progress && fs.Config.ProgressTerminalTitle {
// Clear terminal title
terminal.WriteTerminalTitle("")
}
// dump all running go-routines
if fs.Config.Dump&fs.DumpGoRoutines != 0 {
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
@@ -371,12 +364,6 @@ func StartStats() func() {
// initConfig is run by cobra after initialising the flags
func initConfig() {
// Activate logger systemd support if systemd invocation ID is detected
_, sysdLaunch := systemd.GetInvocationID()
if sysdLaunch {
fs.Config.LogSystemdSupport = true // used during fslog.InitLogging()
}
// Start the logger
fslog.InitLogging()
@@ -392,13 +379,6 @@ func initConfig() {
// Write the args for debug purposes
fs.Debugf("rclone", "Version %q starting with parameters %q", fs.Version, os.Args)
// Inform user about systemd log support now that we have a logger
if sysdLaunch {
fs.Debugf("rclone", "systemd logging support automatically activated")
} else if fs.Config.LogSystemdSupport {
fs.Debugf("rclone", "systemd logging support manually activated")
}
// Start the remote control server if configured
_, err = rcserver.Start(&rcflags.Opt)
if err != nil {
@@ -513,7 +493,7 @@ func AddBackendFlags() {
if opt.IsPassword {
help += " (obscured)"
}
flag := flags.VarPF(pflag.CommandLine, opt, name, opt.ShortOpt, help)
flag := pflag.CommandLine.VarPF(opt, name, opt.ShortOpt, help)
if _, isBool := opt.Default.(bool); isBool {
flag.NoOptDefVal = "true"
}

View File

@@ -270,7 +270,7 @@ func (fsys *FS) Releasedir(path string, fh uint64) (errc int) {
return fsys.closeHandle(fh)
}
// Statfs reads overall stats on the filesystem
// Statfs reads overall stats on the filessystem
func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {
defer log.Trace(path, "")("stat=%+v, errc=%d", stat, &errc)
const blockSize = 4096

View File

@@ -111,7 +111,7 @@ whether the password is already obscured or not and put unobscured
passwords into the config file. If you want to be 100% certain that
the passwords get obscured then use the "--obscure" flag, or if you
are 100% certain you are already passing obscured passwords then use
"--no-obscure". You can also set obscured passwords using the
"--no-obscure". You can also set osbscured passwords using the
"rclone config password" command.
`

View File

@@ -43,7 +43,7 @@ This means that for most duplicated files the ` + "`dedupe`" + `
command will not be interactive.
` + "`dedupe`" + ` considers files to be identical if they have the
same file path and the same hash. If the backend does not support hashes (eg crypt wrapping
same hash. If the backend does not support hashes (eg crypt wrapping
Google Drive) then they will never be found to be identical. If you
use the ` + "`--size-only`" + ` flag then files will be considered
identical if they have the same size (any hash will be ignored). This

View File

@@ -2,7 +2,6 @@ package genautocomplete
import (
"log"
"os"
"github.com/rclone/rclone/cmd"
"github.com/spf13/cobra"
@@ -30,20 +29,11 @@ them directly
If you supply a command line argument the script will be written
there.
If output_file is "-", then the output will be written to stdout.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/bash_completion.d/rclone"
if len(args) > 0 {
if args[0] == "-" {
err := cmd.Root.GenBashCompletion(os.Stdout)
if err != nil {
log.Fatal(err)
}
return
}
out = args[0]
}
err := cmd.Root.GenBashCompletionFile(out)

View File

@@ -2,7 +2,6 @@ package genautocomplete
import (
"log"
"os"
"github.com/rclone/rclone/cmd"
"github.com/spf13/cobra"
@@ -30,20 +29,11 @@ them directly
If you supply a command line argument the script will be written
there.
If output_file is "-", then the output will be written to stdout.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/fish/completions/rclone.fish"
if len(args) > 0 {
if args[0] == "-" {
err := cmd.Root.GenFishCompletion(os.Stdout, true)
if err != nil {
log.Fatal(err)
}
return
}
out = args[0]
}
err := cmd.Root.GenFishCompletionFile(out, true)

View File

@@ -11,10 +11,8 @@ import (
func TestCompletionBash(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_bash")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
@@ -23,32 +21,11 @@ func TestCompletionBash(t *testing.T) {
assert.NotEmpty(t, string(bs))
}
func TestCompletionBashStdout(t *testing.T) {
originalStdout := os.Stdout
tempFile, err := ioutil.TempFile("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
os.Stdout = tempFile
defer func() { os.Stdout = originalStdout }()
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
output, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(output))
}
func TestCompletionZsh(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
@@ -57,31 +34,11 @@ func TestCompletionZsh(t *testing.T) {
assert.NotEmpty(t, string(bs))
}
func TestCompletionZshStdout(t *testing.T) {
originalStdout := os.Stdout
tempFile, err := ioutil.TempFile("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
os.Stdout = tempFile
defer func() { os.Stdout = originalStdout }()
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
output, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(output))
}
func TestCompletionFish(t *testing.T) {
tempFile, err := ioutil.TempFile("", "completion_fish")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
defer func() { _ = tempFile.Close() }()
defer func() { _ = os.Remove(tempFile.Name()) }()
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
@@ -89,22 +46,3 @@ func TestCompletionFish(t *testing.T) {
assert.NoError(t, err)
assert.NotEmpty(t, string(bs))
}
func TestCompletionFishStdout(t *testing.T) {
originalStdout := os.Stdout
tempFile, err := ioutil.TempFile("", "completion_zsh")
assert.NoError(t, err)
defer func() {
_ = tempFile.Close()
_ = os.Remove(tempFile.Name())
}()
os.Stdout = tempFile
defer func() { os.Stdout = originalStdout }()
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
output, err := ioutil.ReadFile(tempFile.Name())
assert.NoError(t, err)
assert.NotEmpty(t, string(output))
}

View File

@@ -30,20 +30,11 @@ them directly
If you supply a command line argument the script will be written
there.
If output_file is "-", then the output will be written to stdout.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/usr/share/zsh/vendor-completions/_rclone"
if len(args) > 0 {
if args[0] == "-" {
err := cmd.Root.GenZshCompletion(os.Stdout)
if err != nil {
log.Fatal(err)
}
return
}
out = args[0]
}
outFile, err := os.Create(out)

View File

@@ -49,7 +49,7 @@ func init() {
flags.BoolVarP(cmdFlags, &checkControl, "check-control", "", true, "Check control characters.")
flags.DurationVarP(cmdFlags, &uploadWait, "upload-wait", "", 0, "Wait after writing a file.")
flags.BoolVarP(cmdFlags, &checkLength, "check-length", "", true, "Check max filename length.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploads with indeterminate file size.")
flags.BoolVarP(cmdFlags, &checkStreaming, "check-streaming", "", true, "Check uploadxs with indeterminate file size.")
}
var commandDefinition = &cobra.Command{

View File

@@ -45,7 +45,7 @@ func newFileHandle(h vfs.Handle, fsys *FS) *FileHandle {
}
}
// Check interface satisfied
// Check interface satistfied
var _ fusefs.FileHandle = (*FileHandle)(nil)
// The String method is for debug printing.

View File

@@ -258,7 +258,7 @@ var _ fusefs.DirStream = (*dirStream)(nil)
// Readdir opens a stream of directory entries.
//
// Readdir essentially returns a list of strings, and it is allowed
// Readdir essentiallly returns a list of strings, and it is allowed
// for Readdir to return different results from Lookup. For example,
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"

View File

@@ -11,7 +11,7 @@ import (
"syscall"
"time"
sysdnotify "github.com/iguanesolutions/go-systemd/v5/notify"
"github.com/okzk/sdnotify"
"github.com/pkg/errors"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
@@ -162,7 +162,7 @@ FUSE.
First set up your remote using ` + "`rclone config`" + `. Check it works with ` + "`rclone ls`" + ` etc.
You can either run mount in foreground mode or background (daemon) mode. Mount runs in
foreground mode by default, use the --daemon flag to specify background mode.
foreground mode by default, use the --daemon flag to specify background mode mode.
Background mode is only supported on Linux and OSX, you can only run mount in
foreground mode on Windows.
@@ -336,6 +336,9 @@ With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the following
parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
When --vfs-read-chunk-size-limit 500M is specified, the result would be
0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Chunked reading will only work with --vfs-cache-mode < full, as the file will always
be copied to the vfs cache before opening with --vfs-cache-mode full.
` + vfs.Help,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(2, 2, command, args)
@@ -448,13 +451,13 @@ func Mount(VFS *vfs.VFS, mountpoint string, mount MountFn, opt *Options) error {
// Unmount on exit
fnHandle := atexit.Register(func() {
_ = sysdnotify.Stopping()
_ = unmount()
_ = sdnotify.Stopping()
})
defer atexit.Unregister(fnHandle)
// Notify systemd
if err := sysdnotify.Ready(); err != nil {
if err := sdnotify.Ready(); err != nil && err != sdnotify.ErrSdNotifyNoSocket {
return errors.Wrap(err, "failed to notify systemd")
}
@@ -479,8 +482,8 @@ waitloop:
}
}
_ = sysdnotify.Stopping()
_ = unmount()
_ = sdnotify.Stopping()
if err != nil {
return errors.Wrap(err, "failed to umount FUSE fs")

View File

@@ -75,7 +75,7 @@ func helpText() (tr []string) {
" d delete file/directory",
}
if !clipboard.Unsupported {
tr = append(tr, " y copy current path to clipboard")
tr = append(tr, " y copy current path to clipbard")
}
tr = append(tr, []string{
" Y display current path",

View File

@@ -208,7 +208,7 @@ func TestGET(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
// Check we got a Last-Modified header and that it is a valid date
// Check we got a Last-Modifed header and that it is a valid date
if test.Status == http.StatusOK || test.Status == http.StatusPartialContent {
lastModified := resp.Header.Get("Last-Modified")
assert.NotEqual(t, "", lastModified, test.Golden)

View File

@@ -61,7 +61,7 @@ to be used within the template to server pages:
| .Name | The full path of a file/directory. |
| .Title | Directory listing of .Name |
| .Sort | The current sort used. This is changeable via ?sort= parameter |
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
| | Sort Options: namedirfist,name,size,time (default namedirfirst) |
| .Order | The current ordering used. This is changeable via ?order= parameter |
| | Order Options: asc,desc (default asc) |
| .Query | Currently unused. |

View File

@@ -132,7 +132,7 @@ func Error(what interface{}, w http.ResponseWriter, text string, err error) {
}
}
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfirst/asc
// ProcessQueryParams takes and sorts/orders based on the request sort/order parameters and default is namedirfist/asc
func (d *Directory) ProcessQueryParams(sortParm string, orderParm string) *Directory {
d.Sort = sortParm
d.Order = orderParm

View File

@@ -27,7 +27,7 @@ var Help = strings.Replace(`
If you supply the parameter |--auth-proxy /path/to/program| then
rclone will use that program to generate backends on the fly which
then are used to authenticate incoming requests. This uses a simple
JSON based protocol with input on STDIN and output on STDOUT.
JSON based protocl with input on STDIN and output on STDOUT.
**PLEASE NOTE:** |--auth-proxy| and |--authorized-keys| cannot be used
together, if |--auth-proxy| is set the authorized keys option will be

View File

@@ -64,7 +64,7 @@ func TestTouchWithTimestamp(t *testing.T) {
checkFile(t, r.Fremote, srcFileName, "")
}
func TestTouchWithLongerTimestamp(t *testing.T) {
func TestTouchWithLognerTimestamp(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()

View File

@@ -413,12 +413,3 @@ put them back in again.` >}}
* Muffin King <jinxz_k@live.com>
* Christopher Stewart <6573710+1f47a@users.noreply.github.com>
* Russell Cattelan <cattelan@digitalelves.com>
* gyutw <30371241+gyutw@users.noreply.github.com>
* Hekmon <edouardhur@gmail.com>
* LaSombra <lasombra@users.noreply.github.com>
* Dov Murik <dov.murik@gmail.com>
* Ameer Dawood <ameer1234567890@gmail.com>
* Dan Hipschman <dan.hipschman@opendoor.com>
* Josh Soref <jsoref@users.noreply.github.com>
* David <david@staron.nl>
* Ingo <ingo@hoffmann.cx>

View File

@@ -404,7 +404,6 @@ Note that Box is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
Box file names can't have the `\` character in. rclone maps this to
and from an identical looking unicode equivalent `` (U+FF3C Fullwidth
Reverse Solidus).
and from an identical looking unicode equivalent ``.
Box only supports filenames up to 255 characters in length.

View File

@@ -63,7 +63,7 @@ description: "Rclone Changelog"
* Add reverse proxy pluginsHandler for serving plugins (Chaitanya Bankanhal)
* Add `mount/listmounts` option for listing current mounts (Chaitanya Bankanhal)
* Add `operations/uploadfile` to upload a file through rc using encoding multipart/form-data (Chaitanya Bankanhal)
* Add `core/command` to execute rclone terminal commands. (Chaitanya Bankanhal)
* Add `core/copmmand` to execute rclone terminal commands. (Chaitanya Bankanhal)
* `rclone check`
* Add reporting of filenames for same/missing/changed (Nick Craig-Wood)
* Make check command obey `--dry-run`/`-i`/`--interactive` (Nick Craig-Wood)
@@ -172,7 +172,7 @@ description: "Rclone Changelog"
* Google Cloud Storage
* Add support for anonymous access (Kai Lüke)
* Jottacloud
* Bring back legacy authentication for use with whitelabel versions (buengese)
* Bring back legacy authentification for use with whitelabel versions (buengese)
* Switch to new api root - also implement a very ugly workaround for the DirMove failures (buengese)
* Onedrive
* Rework cancel of multipart uploads on rclone exit (Nick Craig-Wood)
@@ -322,7 +322,7 @@ all the docs and Edward Barker for helping re-write the front page.
* Add `--header` flag to add HTTP headers to every HTTP transaction (Nick Craig-Wood)
* Add `--check-first` to do all checking before starting transfers (Nick Craig-Wood)
* Add `--track-renames-strategy` for configurable matching criteria for `--track-renames` (Bernd Schoolmann)
* Add `--cutoff-mode` hard,soft,cautious (Shing Kit Chan & Franklyn Tackitt)
* Add `--cutoff-mode` hard,soft,catious (Shing Kit Chan & Franklyn Tackitt)
* Filter flags (eg `--files-from -`) can read from stdin (fishbullet)
* Add `--error-on-no-transfer` option (Jon Fautley)
* Implement `--order-by xxx,mixed` for copying some small and some big files (Nick Craig-Wood)
@@ -605,7 +605,7 @@ all the docs and Edward Barker for helping re-write the front page.
* dbhashsum: Stop it returning UNSUPPORTED on dropbox (Nick Craig-Wood)
* dedupe: Add missing modes to help string (Nick Craig-Wood)
* operations
* Fix dedupe continuing on errors like insufficientFilePersimmon (SezalAgrawal)
* Fix dedupe continuing on errors like insufficientFilePermisson (SezalAgrawal)
* Clear accounting before low level retry (Maciej Zimnoch)
* Write debug message when hashes could not be checked (Ole Schütt)
* Move interface assertion to tests to remove pflag dependency (Nick Craig-Wood)
@@ -669,7 +669,7 @@ all the docs and Edward Barker for helping re-write the front page.
* S3
* Re-implement multipart upload to fix memory issues (Nick Craig-Wood)
* Add `--s3-copy-cutoff` for size to switch to multipart copy (Nick Craig-Wood)
* Add new region Asia Pacific (Hong Kong) (Outvi V)
* Add new region Asia Patific (Hong Kong) (Outvi V)
* Reduce memory usage streaming files by reducing max stream upload size (Nick Craig-Wood)
* Add `--s3-list-chunk` option for bucket listing (Thomas Kriechbaumer)
* Force path style bucket access to off for AWS deprecation (Nick Craig-Wood)
@@ -930,7 +930,7 @@ all the docs and Edward Barker for helping re-write the front page.
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
* Mount
* Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
* Default `--deamon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
* Update docs to show mounting from root OK for bucket based (Nick Craig-Wood)
* Remove nonseekable flag from write files (Nick Craig-Wood)
* VFS
@@ -1093,7 +1093,7 @@ all the docs and Edward Barker for helping re-write the front page.
* Add client side TLS/SSL flags `--ca-cert`/`--client-cert`/`--client-key` (Nick Craig-Wood)
* Implement `--suffix-keep-extension` for use with `--suffix` (Nick Craig-Wood)
* build:
* Switch to semver compliant version tags to be go modules compliant (Nick Craig-Wood)
* Switch to semvar compliant version tags to be go modules compliant (Nick Craig-Wood)
* Update to use go1.12.x for the build (Nick Craig-Wood)
* serve dlna: Add connection manager service description to improve compatibility (Dan Walters)
* lsf: Add 'e' format to show encrypted names and 'o' for original IDs (Nick Craig-Wood)

View File

@@ -27,7 +27,6 @@ them directly
If you supply a command line argument the script will be written
there.
If output_file is `-`, then the output will be written to stdout.
```
rclone genautocomplete bash [output_file] [flags]

View File

@@ -27,7 +27,6 @@ them directly
If you supply a command line argument the script will be written
there.
If output_file is `-`, then the output will be written to stdout.
```
rclone genautocomplete fish [output_file] [flags]

View File

@@ -27,7 +27,6 @@ them directly
If you supply a command line argument the script will be written
there.
If output_file is `-`, then the output will be written to stdout.
```
rclone genautocomplete zsh [output_file] [flags]

View File

@@ -6,26 +6,23 @@ description: "Encryption overlay remote"
{{< icon "fa fa-lock" >}}Crypt
----------------------------------------
Rclone `crypt` remotes encrypt and decrypt other remotes.
The `crypt` remote encrypts and decrypts another remote.
To use `crypt`, first set up the underlying remote. Follow the `rclone
config` instructions for that remote.
To use it first set up the underlying remote following the config
instructions for that remote. You can also use a local pathname
instead of a remote which will encrypt and decrypt from that directory
which might be useful for encrypting onto a USB stick for example.
`crypt` applied to a local pathname instead of a remote will
encrypt and decrypt that directory, and can be used to encrypt USB
removable drives.
First check your chosen remote is working - we'll call it
`remote:path` in these docs. Note that anything inside `remote:path`
will be encrypted and anything outside won't. This means that if you
are using a bucket based remote (eg S3, B2, swift) then you should
probably put the bucket in the remote `s3:bucket`. If you just use
`s3:` then rclone will make encrypted bucket names too (if using file
name encryption) which may or may not be what you want.
Before configuring the crypt remote, check the underlying remote is
working. In this example the underlying remote is called `remote:path`.
Anything inside `remote:path` will be encrypted and anything outside
will not. In the case of an S3 based underlying remote (eg Amazon S3,
B2, Swift) it is generally advisable to define a crypt remote in the
underlying remote `s3:bucket`. If `s3:` alone is specified alongside
file name encryption, rclone will encrypt the bucket name.
Configure `crypt` using `rclone config`. In this example the `crypt`
remote is called `secret`, to differentiate it from the underlying
`remote`.
Now configure `crypt` using `rclone config`. We will call this one
`secret` to differentiate it from the `remote`.
```
No remotes found - make a new one
@@ -99,42 +96,49 @@ d) Delete this remote
y/e/d> y
```
**Important** The crypt password stored in `rclone.conf` is lightly
obscured. That only protects it from cursory inspection. It is not
secure unless encryption of `rclone.conf` is specified.
**Important** The password is stored in the config file is lightly
obscured so it isn't immediately obvious what it is. It is in no way
secure unless you use config file encryption.
A long passphrase is recommended, or `rclone config` can generate a
random one.
A long passphrase is recommended, or you can use a random one.
The obscured password is created using AES-CTR with a static key. The
salt is stored verbatim at the beginning of the obscured password. This
static key is shared between all versions of rclone.
The obscured password is created by using AES-CTR with a static key, with
the salt stored verbatim at the beginning of the obscured password. This
static key is shared by between all versions of rclone.
If you reconfigure rclone with the same passwords/passphrases
elsewhere it will be compatible, but the obscured version will be different
due to the different salt.
Rclone does not encrypt
Note that rclone does not encrypt
* file length - this can be calculated within 16 bytes
* modification time - used for syncing
## Specifying the remote ##
In normal use, ensure the remote has a `:` in. If specified without,
rclone uses a local directory of that name. For example if a remote
`/path/to/secret/files` is specified, rclone encrypts content to that
directory. If a remote `name` is specified, rclone targets a directory
`name` in the current directory.
In normal use, make sure the remote has a `:` in. If you specify the
remote without a `:` then rclone will use a local directory of that
name. So if you use a remote of `/path/to/secret/files` then rclone
will encrypt stuff to that directory. If you use a remote of `name`
then rclone will put files in a directory called `name` in the current
directory.
If remote `remote:path/to/dir` is specified, rclone stores encrypted
files in `path/to/dir` on the remote. With file name encryption, files
saved to `secret:subdir/subfile` are stored in the unencrypted path
`path/to/dir` but the `subdir/subpath` element is encrypted.
If you specify the remote as `remote:path/to/dir` then rclone will
store encrypted files in `path/to/dir` on the remote. If you are using
file name encryption, then when you save files to
`secret:subdir/subfile` this will store them in the unencrypted path
`path/to/dir` but the `subdir/subpath` bit will be encrypted.
Note that unless you want encrypted bucket names (which are difficult
to manage because you won't know what directory they represent in web
interfaces etc), you should probably specify a bucket, eg
`remote:secretbucket` when using bucket based remotes such as S3,
Swift, Hubic, B2, GCS.
## Example ##
Create the following file structure using "standard" file name
To test I made a little directory of files using "standard" file name
encryption.
```
@@ -148,7 +152,7 @@ plaintext/
└── file4.txt
```
Copy these to the remote, and list them
Copy these to the remote and list them back
```
$ rclone -q copy plaintext secret:
@@ -160,7 +164,7 @@ $ rclone -q ls secret:
9 subdir/file3.txt
```
The crypt remote looks like
Now see what that looked like when encrypted
```
$ rclone -q ls remote:path
@@ -171,7 +175,7 @@ $ rclone -q ls remote:path
56 86vhrsv86mpbtd3a0akjuqslj8/8njh1sk437gttmep3p70g81aps
```
The directory structure is preserved
Note that this retains the directory structure which means you can do this
```
$ rclone -q ls secret:subdir
@@ -180,9 +184,9 @@ $ rclone -q ls secret:subdir
10 subsubdir/file4.txt
```
Without file name encryption `.bin` extensions are added to underlying
names. This prevents the cloud provider attempting to interpret file
content.
If don't use file name encryption then the remote will look like this
- note the `.bin` extensions added to prevent the cloud provider
attempting to interpret the data.
```
$ rclone -q ls remote:path
@@ -195,6 +199,8 @@ $ rclone -q ls remote:path
### File name encryption modes ###
Here are some of the features of the file name encryption modes
Off
* doesn't hide file names or directory structure
@@ -213,19 +219,17 @@ Standard
Obfuscation
This is a simple "rotate" of the filename, with each file having a rot
distance based on the filename. Rclone stores the distance at the
beginning of the filename. A file called "hello" may become "53.jgnnq".
distance based on the filename. We store the distance at the beginning
of the filename. So a file called "hello" may become "53.jgnnq".
Obfuscation is not a strong encryption of filenames, but hinders
automated scanning tools picking up on filename patterns. It is an
intermediate between "off" and "standard" which allows for longer path
segment names.
This is not a strong encryption of filenames, but it may stop automated
scanning tools from picking up on filename patterns. As such it's an
intermediate between "off" and "standard". The advantage is that it
allows for longer path segment names.
There is a possibility with some unicode based filenames that the
obfuscation is weak and may map lower case characters to upper case
equivalents.
Obfuscation cannot be relied upon for strong protection.
equivalents. You can not rely on this for strong protection.
* file names very lightly obfuscated
* file names can be longer than standard encryption
@@ -233,14 +237,13 @@ Obfuscation cannot be relied upon for strong protection.
* directory structure visible
* identical files names will have identical uploaded names
Cloud storage systems have limits on file name length and
total path length which rclone is more likely to breach using
"Standard" file name encryption. Where file names are less thn 156
characters in length issues should not be encountered, irrespective of
cloud storage provider.
Cloud storage systems have various limits on file name length and
total path length which you are more likely to hit using "Standard"
file name encryption. If you keep your file names to below 156
characters in length then you should be OK on all providers.
An alternative, future rclone file name encryption mode may tolerate
backend provider path length limits.
There may be an even more secure file name encryption mode in the
future which will address the long file name problem.
### Directory name encryption ###
Crypt offers the option of encrypting dir names or leaving them intact.
@@ -266,10 +269,10 @@ Example:
Crypt stores modification times using the underlying remote so support
depends on that.
Hashes are not stored for crypt. However the data integrity is
Hashes are not stored for crypt. However the data integrity is
protected by an extremely strong crypto authenticator.
Use the `rclone cryptcheck` command to check the
Note that you should use the `rclone cryptcheck` command to check the
integrity of a crypted remote instead of `rclone check` which can't
check the checksums properly.

View File

@@ -1108,11 +1108,6 @@ Note: On Windows until [this bug](https://github.com/Azure/go-ansiterm/issues/26
is fixed all non-ASCII characters will be replaced with `.` when
`--progress` is in use.
### --progress-terminal-title ###
This flag, when used with `-P/--progress`, will print the string `ETA: %s`
to the terminal title.
### -q, --quiet ###
This flag will limit rclone's output to error messages only.
@@ -1258,17 +1253,11 @@ or with `--backup-dir`. See `--backup-dir` for more info.
For example
rclone copy -i /path/to/local/file remote:current --suffix .bak
rclone sync -i /path/to/local/file remote:current --suffix .bak
will copy `/path/to/local` to `remote:current`, but for any files
will sync `/path/to/local` to `remote:current`, but for any files
which would have been updated or deleted have .bak added.
If using `rclone sync` with `--suffix` and without `--backup-dir` then
it is recommended to put a filter rule in excluding the suffix
otherwise the `sync` will delete the backup files.
rclone sync -i /path/to/local/file remote:current --suffix .bak --exclude "*.bak"
### --suffix-keep-extension ###
When using `--suffix`, setting this causes rclone put the SUFFIX

View File

@@ -48,7 +48,7 @@ Choose a number from below, or type in your own value
1 / Connect to ftp.example.com
\ "ftp.example.com"
host> ftp.example.com
FTP username, leave blank for current username, $USER
FTP username, leave blank for current username, ncw
Enter a string value. Press Enter for the default ("").
user>
FTP port, leave blank to use default (21)

View File

@@ -205,7 +205,7 @@ or the latest version (equivalent to the beta) with
These will build the binary in `$(go env GOPATH)/bin`
(`~/go/bin/rclone` by default) after downloading the source to the go
module cache. Note - do **not** use the `-u` flag here. This causes go
to try to update the dependencies that rclone uses and sometimes these
to try to update the depencencies that rclone uses and sometimes these
don't work with the current version of rclone.
## Installation with Ansible ##

View File

@@ -27,8 +27,8 @@ Note that the web interface may refer to this token as a JottaCli token.
### Legacy Setup
If you are using one of the whitelabel versions (Elgiganten, Com Hem Cloud) you may not have the option
to generate a CLI token. In this case you'll have to use the legacy authentication. To to this select
yes when the setup asks for legacy authentication and enter your username and password.
to generate a CLI token. In this case you'll have to use the legacy authentification. To to this select
yes when the setup asks for legacy authentification and enter your username and password.
The rest of the setup is identical to the default setup.
Here is an example of how to make a remote called `remote` with the default setup. First run:
@@ -59,7 +59,7 @@ y) Yes
n) No
y/n> n
Remote config
Use legacy authentication?.
Use legacy authentification?.
This is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.
y) Yes
n) No (default)

View File

@@ -402,7 +402,7 @@ If the server can't do `CleanUp` then `rclone cleanup` will return an
error.
‡‡ Note that while Box implements this it has to delete every file
individually so it will be slower than emptying the trash via the WebUI
idividually so it will be slower than emptying the trash via the WebUI
### ListR ###

View File

@@ -259,7 +259,7 @@ Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
NB if you set this to > 1 then the checksums of multipart uploads
NB if you set this to > 1 then the checksums of multpart uploads
become corrupted (the uploads themselves are not corrupted though).
If you are uploading small numbers of large file over high speed link

View File

@@ -582,7 +582,7 @@ Region to connect to.
- Asia Pacific (Mumbai)
- Needs location constraint ap-south-1.
- "ap-east-1"
- Asia Pacific (Hong Kong) Region
- Asia Patific (Hong Kong) Region
- Needs location constraint ap-east-1.
- "sa-east-1"
- South America (Sao Paulo) Region
@@ -1493,7 +1493,7 @@ All the objects shown will be marked for restore, then
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
It returns a list of status dictionaries with Remote and Status
keys. The Status will be OK if it was successful or an error message
keys. The Status will be OK if it was successfull or an error message
if not.
[
@@ -1794,7 +1794,7 @@ Choose a number from below, or type in your own value
secret_access_key> <>
```
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an endpoint address.
6. Specify the endpoint for IBM COS. For Public IBM COS, choose from the option below. For On Premise IBM COS, enter an enpoint address.
```
Endpoint for IBM COS S3 API.
Specify if using an IBM COS On Premise.
@@ -1855,7 +1855,7 @@ Choose a number from below, or type in your own value
location_constraint>1
```
9. Specify a canned ACL. IBM Cloud (Storage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
9. Specify a canned ACL. IBM Cloud (Strorage) supports "public-read" and "private". IBM Cloud(Infra) supports all the canned ACLs. On-Premise COS supports all the canned ACLs.
```
Canned ACL used when creating buckets and/or storing objects in S3.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl

View File

@@ -52,7 +52,7 @@ Choose a number from below, or type in your own value
1 / Connect to example.com
\ "example.com"
host> example.com
SSH username, leave blank for current username, $USER
SSH username, leave blank for current username, ncw
user> sftpuser
SSH port, leave blank to use default (22)
port>
@@ -139,66 +139,6 @@ key_file = ~/id_rsa
pubkey_file = ~/id_rsa-cert.pub
````
If you concatenate a cert with a private key then you can specify the
merged file in both places.
Note: the cert must come first in the file. e.g.
```
cat id_rsa-cert.pub id_rsa > merged_key
```
### Host key validation ###
By default rclone will not check the server's host key for validation. This
can allow an attacker to replace a server with their own and if you use
password authentication then this can lead to that password being exposed.
Host key matching, using standard `known_hosts` files can be turned on by
enabling the `known_hosts_file` option. This can point to the file maintained
by `OpenSSH` or can point to a unique file.
e.g.
```
[remote]
type = sftp
host = example.com
user = sftpuser
pass =
known_hosts_file = ~/.ssh/known_hosts
````
There are some limitations:
* `rclone` will not _manage_ this file for you. If the key is missing or
wrong then the connection will be refused.
* If the server is set up for a certificate host key then the entry in
the `known_hosts` file _must_ be the `@cert-authority` entry for the CA
* Unlike `OpenSSH`, the libraries used by `rclone` do not permit (at time
of writing) multiple host keys to be listed for a server. Only the first
entry is used.
If the host key provided by the server does not match the one in the
file (or is missing) then the connection will be aborted and an error
returned such as
NewFs: couldn't connect SSH: ssh: handshake failed: knownhosts: key mismatch
or
NewFs: couldn't connect SSH: ssh: handshake failed: knownhosts: key is unknown
If you see an error such as
NewFs: couldn't connect SSH: ssh: handshake failed: ssh: no authorities for hostname: example.com:22
then it is likely the server has presented a CA signed host certificate
and you will need to add the appropriate `@cert-authority` entry.
The `known_hosts_file` setting can be set during `rclone config` as an
advanced option.
### ssh-agent on macOS ###
Note that there seem to be various problems with using an ssh-agent on
@@ -243,7 +183,7 @@ SSH host to connect to
#### --sftp-user
SSH username, leave blank for current username, $USER
SSH username, leave blank for current username, ncw
- Config: user
- Env Var: RCLONE_SFTP_USER
@@ -305,20 +245,6 @@ in the new OpenSSH format can't be used.
- Type: string
- Default: ""
#### --sftp-pubkey-file
Optional path to public key file.
Set this if you have a signed certificate you want to use for authentication.
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
- Config: pubkey_file
- Env Var: RCLONE_SFTP_PUBKEY_FILE
- Type: string
- Default: ""
#### --sftp-key-use-agent
When set forces the usage of the ssh-agent.
@@ -332,6 +258,18 @@ when the ssh-agent contains many keys.
- Type: bool
- Default: false
#### --sftp-pubkey-file
Path to public key file, set if you want to use certificate based authentication
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
- Config: pubkey_file
- Env Var: RCLONE_SFTP_PUBKEY_FILE
- Type: string
- Default: ""
#### --sftp-use-insecure-cipher
Enable the use of insecure ciphers and key exchange methods.
@@ -371,23 +309,6 @@ Leave blank or set to false to enable hashing (recommended), set to true to disa
Here are the advanced options specific to sftp (SSH/SFTP Connection).
#### --sftp-known-hosts-file
Optional path to known_hosts file.
Set this value to enable server host key validation.
Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.
- Config: known_hosts_file
- Env Var: RCLONE_SFTP_KNOWN_HOSTS_FILE
- Type: string
- Default: ""
- Examples:
- "~/.ssh/known_hosts"
- Use OpenSSH's known_hosts file
#### --sftp-ask-password
Allow asking for SFTP password when needed.

View File

@@ -65,7 +65,7 @@ d) Delete this remote
y/e/d> y
```
### Setup with API key and passphrase
### Setup with API key and passhprase
```
No remotes found - make a new one

View File

@@ -450,7 +450,7 @@ func shortenName(in string, size int) string {
return in
}
name := []rune(in)
size-- // don't count ellipsis rune
size-- // don't count elipsis rune
suffixLength := size / 2
prefixLength := size - suffixLength
suffixStart := len(name) - suffixLength

View File

@@ -11,7 +11,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/lib/terminal"
)
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
@@ -283,11 +282,6 @@ func (s *StatsInfo) String() string {
xfrchkString,
)
if fs.Config.ProgressTerminalTitle {
// Writes ETA to the terminal title
terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed))
}
if !fs.Config.StatsOneLine {
_, _ = buf.WriteRune('\n')
errorDetails := ""

View File

@@ -99,7 +99,7 @@ func StartTokenTicker() {
}()
}
// limitBandwidth sleeps for the correct amount of time for the passage
// limitBandwith sleeps for the correct amount of time for the passage
// of n bytes according to the current bandwidth limit
func limitBandwidth(n int) {
tokenBucketMu.Lock()

View File

@@ -91,7 +91,7 @@ func (tm *transferMap) _sortedSlice() []*Transfer {
func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string {
tm.mu.RLock()
defer tm.mu.RUnlock()
stringList := make([]string, 0, len(tm.items))
strngs := make([]string, 0, len(tm.items))
for _, tr := range tm._sortedSlice() {
if exclude != nil {
exclude.mu.RLock()
@@ -111,9 +111,9 @@ func (tm *transferMap) String(progress *inProgress, exclude *transferMap) string
tm.name,
)
}
stringList = append(stringList, " * "+out)
strngs = append(strngs, " * "+out)
}
return strings.Join(stringList, "\n")
return strings.Join(strngs, "\n")
}
// progress returns total bytes read as well as the size.

View File

@@ -80,7 +80,7 @@ func parseWeekday(dayOfWeek string) (int, error) {
// Set the bandwidth timetable.
func (x *BwTimetable) Set(s string) error {
// The timetable is formatted as:
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
// "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,banwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off"
// If only a single bandwidth identifier is provided, we assume constant bandwidth.
if len(s) == 0 {

View File

@@ -38,7 +38,7 @@ type ChunkedReader struct {
//
// An initialChunkSize of <= 0 will disable chunked reading.
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
// doubled after each chunk read with a maximum of maxChunkSize.
// doubled after each chunk read with a maximun of maxChunkSize.
// A Seek or RangeSeek will reset the chunk size to it's initial value
func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
if initialChunkSize <= 0 {

View File

@@ -42,7 +42,6 @@ var (
type ConfigInfo struct {
LogLevel LogLevel
StatsLogLevel LogLevel
LogSystemdSupport bool
UseJSONLog bool
DryRun bool
Interactive bool
@@ -62,7 +61,7 @@ type ConfigInfo struct {
DeleteMode DeleteMode
MaxDelete int64
TrackRenames bool // Track file renames.
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
TrackRenamesStrategy string // Comma separated list of stratgies used to track renames
LowLevelRetries int
UpdateOlder bool // Skip files that are newer on the destination
NoGzip bool // Disable compression
@@ -107,7 +106,6 @@ type ConfigInfo struct {
StatsOneLineDateFormat string // If we want to customize the prefix
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
Progress bool
ProgressTerminalTitle bool
Cookie bool
UseMmap bool
CaCert string // Client Side CA

View File

@@ -177,7 +177,7 @@ func TestNewRemoteName(t *testing.T) {
assert.Equal(t, "newname", NewRemoteName())
}
func TestCreateUpdatePasswordRemote(t *testing.T) {
func TestCreateUpatePasswordRemote(t *testing.T) {
defer testConfigFile(t, "update.conf")()
for _, doObscure := range []bool{false, true} {

View File

@@ -110,7 +110,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
flags.BoolVarP(flagSet, &fs.Config.ErrorOnNoTransfer, "error-on-no-transfer", "", fs.Config.ErrorOnNoTransfer, "Sets exit code 9 if no files are transferred, useful in scripts")
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
flags.BoolVarP(flagSet, &fs.Config.ProgressTerminalTitle, "progress-terminal-title", "", fs.Config.ProgressTerminalTitle, "Show progress on the terminal title. Requires -P/--progress.")
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
@@ -124,7 +123,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.StringArrayVarP(flagSet, &downloadHeaders, "header-download", "", nil, "Set HTTP header for download transactions")
flags.StringArrayVarP(flagSet, &headers, "header", "", nil, "Set HTTP header for all transactions")
flags.BoolVarP(flagSet, &fs.Config.RefreshTimes, "refresh-times", "", fs.Config.RefreshTimes, "Refresh the modtime of remote files.")
flags.BoolVarP(flagSet, &fs.Config.LogSystemdSupport, "log-systemd", "", fs.Config.LogSystemdSupport, "Activate systemd integration for the logger.")
}
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions

View File

@@ -156,15 +156,6 @@ func FVarP(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage strin
setDefaultFromEnv(flags, name)
}
// VarPF defines a flag which can be overridden by an environment variable
//
// It is a thin wrapper around pflag.VarPF
func VarPF(flags *pflag.FlagSet, value pflag.Value, name, shorthand, usage string) *pflag.Flag {
flag := flags.VarPF(value, name, shorthand, usage)
setDefaultFromEnv(flags, name)
return flag
}
// StringArrayP defines a flag which can be overridden by an environment variable
//
// It sets one value only - command line flags can be used to set more.

View File

@@ -1143,16 +1143,6 @@ func UnWrapObject(o Object) Object {
return o
}
// UnWrapObjectInfo returns the underlying Object unwrapped as much as
// possible or nil.
func UnWrapObjectInfo(oi ObjectInfo) Object {
o, ok := oi.(Object)
if !ok {
return nil
}
return UnWrapObject(o)
}
// Find looks for a RegInfo object for the name passed in. The name
// can be either the Name or the Prefix.
//

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"log"
sysdjournald "github.com/iguanesolutions/go-systemd/v5/journald"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -71,28 +70,7 @@ func (l *LogLevel) Type() string {
// LogPrint sends the text to the logger of level
var LogPrint = func(level LogLevel, text string) {
var prefix string
if Config.LogSystemdSupport {
switch level {
case LogLevelDebug:
prefix = sysdjournald.DebugPrefix
case LogLevelInfo:
prefix = sysdjournald.InfoPrefix
case LogLevelNotice:
prefix = sysdjournald.NoticePrefix
case LogLevelWarning:
prefix = sysdjournald.WarningPrefix
case LogLevelError:
prefix = sysdjournald.ErrPrefix
case LogLevelCritical:
prefix = sysdjournald.CritPrefix
case LogLevelAlert:
prefix = sysdjournald.AlertPrefix
case LogLevelEmergency:
prefix = sysdjournald.EmergPrefix
}
}
text = fmt.Sprintf("%s%-6s: %s", prefix, level, text)
text = fmt.Sprintf("%-6s: %s", level, text)
_ = log.Output(4, text)
}

View File

@@ -90,19 +90,14 @@ func Stack(o interface{}, info string) {
func InitLogging() {
flagsStr := "," + Opt.Format + ","
var flags int
if !fs.Config.LogSystemdSupport {
if strings.Contains(flagsStr, ",date,") {
flags |= log.Ldate
}
if strings.Contains(flagsStr, ",time,") {
flags |= log.Ltime
}
if strings.Contains(flagsStr, ",microseconds,") {
flags |= log.Lmicroseconds
}
if strings.Contains(flagsStr, ",UTC,") {
flags |= log.LUTC
}
if strings.Contains(flagsStr, ",date,") {
flags |= log.Ldate
}
if strings.Contains(flagsStr, ",time,") {
flags |= log.Ltime
}
if strings.Contains(flagsStr, ",microseconds,") {
flags |= log.Lmicroseconds
}
if strings.Contains(flagsStr, ",longfile,") {
flags |= log.Llongfile
@@ -110,6 +105,9 @@ func InitLogging() {
if strings.Contains(flagsStr, ",shortfile,") {
flags |= log.Lshortfile
}
if strings.Contains(flagsStr, ",UTC,") {
flags |= log.LUTC
}
log.SetFlags(flags)
// Log file output

View File

@@ -11,7 +11,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/march"
"github.com/rclone/rclone/lib/readers"
@@ -239,16 +238,10 @@ func CheckFn(ctx context.Context, opt *CheckOpt) error {
if c.matches > 0 {
fs.Logf(c.opt.Fdst, "%d matching files", c.matches)
}
if err != nil {
return err
}
if c.differences > 0 {
// Return an already counted error so we don't double count this error too
err = fserrors.FsError(errors.Errorf("%d differences found", c.differences))
fserrors.Count(err)
return err
return errors.Errorf("%d differences found", c.differences)
}
return nil
return err
}
// Check the files in fsrc and fdst according to Size and hash

View File

@@ -73,7 +73,7 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
rc, err := NewReOpen(ctx, mc.src, fs.Config.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1})
if err != nil {
return errors.Wrap(err, "multipart copy: failed to open source")
return errors.Wrap(err, "multpart copy: failed to open source")
}
defer fs.CheckClose(rc, &err)
@@ -89,29 +89,29 @@ func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err
if nr > 0 {
err = mc.acc.AccountRead(nr)
if err != nil {
return errors.Wrap(err, "multipart copy: accounting failed")
return errors.Wrap(err, "multpart copy: accounting failed")
}
nw, ew := mc.wc.WriteAt(buf[0:nr], offset)
if nw > 0 {
offset += int64(nw)
}
if ew != nil {
return errors.Wrap(ew, "multipart copy: write failed")
return errors.Wrap(ew, "multpart copy: write failed")
}
if nr != nw {
return errors.Wrap(io.ErrShortWrite, "multipart copy")
return errors.Wrap(io.ErrShortWrite, "multpart copy")
}
}
if er != nil {
if er != io.EOF {
return errors.Wrap(er, "multipart copy: read failed")
return errors.Wrap(er, "multpart copy: read failed")
}
break
}
}
if offset != end {
return errors.Errorf("multipart copy: wrote %d bytes but expected to write %d", offset-start, end-start)
return errors.Errorf("multpart copy: wrote %d bytes but expected to write %d", offset-start, end-start)
}
fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v finished", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start))
@@ -163,7 +163,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object,
// create write file handle
mc.wc, err = openWriterAt(gCtx, remote, mc.size)
if err != nil {
return nil, errors.Wrap(err, "multipart copy: failed to open destination")
return nil, errors.Wrap(err, "multpart copy: failed to open destination")
}
fs.Debugf(src, "Starting multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize))

View File

@@ -151,15 +151,6 @@ func defaultEqualOpt() equalOpt {
}
}
var modTimeUploadOnce sync.Once
// emit a log if we are about to upload a file to set its modification time
func logModTimeUpload(dst fs.Object) {
modTimeUploadOnce.Do(func() {
fs.Logf(dst.Fs(), "Forced to upload files to set modification times on this backend.")
})
}
func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool {
if sizeDiffers(src, dst) {
fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
@@ -181,12 +172,9 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
return false
}
if ht == hash.None {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
if common.Count() == 0 {
checksumWarning.Do(func() {
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
})
}
checksumWarning.Do(func() {
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
})
fs.Debugf(src, "Size of src and dst objects identical")
} else {
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
@@ -235,12 +223,10 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt)
// Update the mtime of the dst object here
err := dst.SetModTime(ctx, srcModTime)
if err == fs.ErrorCantSetModTime {
logModTimeUpload(dst)
fs.Infof(dst, "src and dst identical but can't set mod time without re-uploading")
fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading")
return false
} else if err == fs.ErrorCantSetModTimeWithoutDelete {
logModTimeUpload(dst)
fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
fs.Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
// put in the BackupDir than deleted which is what will happen if we don't delete it.
if fs.Config.BackupDir == "" {
@@ -1536,11 +1522,12 @@ func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err
}
}
}
} else if fs.Config.Suffix != "" {
} else {
if srcFileName == "" {
return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir"))
}
// --backup-dir is not set but --suffix is - use the destination as the backupDir
backupDir = fdst
} else {
return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty"))
}
if !CanServerSideMove(backupDir) {
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))

View File

@@ -32,7 +32,7 @@ type pipe struct {
func newPipe(orderBy string, stats func(items int, totalSize int64), maxBacklog int) (*pipe, error) {
if maxBacklog < 0 {
maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest positive int
maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest posititive int
}
less, fraction, err := newLess(orderBy)
if err != nil {

View File

@@ -40,7 +40,7 @@ type syncCopyMove struct {
deletersWg sync.WaitGroup // for delete before go routine
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
trackRenames bool // set if we should do server side renames
trackRenamesStrategy trackRenamesStrategy // strategies used for tracking renames
trackRenamesStrategy trackRenamesStrategy // stratgies used for tracking renames
dstFilesMu sync.Mutex // protect dstFiles
dstFiles map[string]fs.Object // dst files, always filled
srcFiles map[string]fs.Object // src files, only used if deleteBefore

View File

@@ -1590,7 +1590,7 @@ func TestSyncCopyDest(t *testing.T) {
}
// Test with BackupDir set
func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeepExtension bool) {
func testSyncBackupDir(t *testing.T, suffix string, suffixKeepExtension bool) {
r := fstest.NewRun(t)
defer r.Finalise()
@@ -1599,23 +1599,7 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
}
r.Mkdir(context.Background(), r.Fremote)
if backupDir != "" {
fs.Config.BackupDir = r.FremoteName + "/" + backupDir
backupDir += "/"
} else {
fs.Config.BackupDir = ""
backupDir = "dst/"
// Exclude the suffix from the sync otherwise the sync
// deletes the old backup files
flt, err := filter.NewFilter(nil)
require.NoError(t, err)
require.NoError(t, flt.AddRule("- *"+suffix))
oldFlt := filter.Active
filter.Active = flt
defer func() {
filter.Active = oldFlt
}()
}
fs.Config.BackupDir = r.FremoteName + "/backup"
fs.Config.Suffix = suffix
fs.Config.SuffixKeepExtension = suffixKeepExtension
defer func() {
@@ -1643,14 +1627,14 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
file1.Path = backupDir + "one" + suffix
file1.Path = "backup/one" + suffix
file1a.Path = "dst/one"
// two should be unchanged
// three should be moved to the backup dir
if suffixKeepExtension {
file3.Path = backupDir + "three" + suffix + ".txt"
file3.Path = "backup/three" + suffix + ".txt"
} else {
file3.Path = backupDir + "three.txt" + suffix
file3.Path = "backup/three.txt" + suffix
}
fstest.CheckItems(t, r.Fremote, file1, file2, file3, file1a)
@@ -1668,29 +1652,22 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep
require.NoError(t, err)
// one should be moved to the backup dir and the new one installed
file1a.Path = backupDir + "one" + suffix
file1a.Path = "backup/one" + suffix
file1b.Path = "dst/one"
// two should be unchanged
// three should be moved to the backup dir
if suffixKeepExtension {
file3a.Path = backupDir + "three" + suffix + ".txt"
file3a.Path = "backup/three" + suffix + ".txt"
} else {
file3a.Path = backupDir + "three.txt" + suffix
file3a.Path = "backup/three.txt" + suffix
}
fstest.CheckItems(t, r.Fremote, file1b, file2, file3a, file1a)
}
func TestSyncBackupDir(t *testing.T) {
testSyncBackupDir(t, "backup", "", false)
}
func TestSyncBackupDirWithSuffix(t *testing.T) {
testSyncBackupDir(t, "backup", ".bak", false)
}
func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "", false) }
func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, ".bak", false) }
func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) {
testSyncBackupDir(t, "backup", "-2019-01-01", true)
}
func TestSyncBackupDirSuffixOnly(t *testing.T) {
testSyncBackupDir(t, "", ".bak", false)
testSyncBackupDir(t, "-2019-01-01", true)
}
// Test with Suffix set

View File

@@ -205,7 +205,7 @@ func (dm *dirMap) add(dir string, sent bool) {
if !sent {
return
}
// currentSent == false && sent == true so needs overriding
// currenSent == false && sent == true so needs overriding
}
dm.m[dir] = sent
// Add parents in as unsent

View File

@@ -254,7 +254,7 @@ func (r *Run) WriteObjectTo(ctx context.Context, f fs.Fs, remote, content string
}
r.Mkdir(ctx, f)
// calculate all hashes f supports for content
// caclulate all hashes f supports for content
hash, err := hash.NewMultiHasherTypes(f.Hashes())
if err != nil {
r.Fatalf("Failed to make new multi hasher: %v", err)

Some files were not shown because too many files have changed in this diff Show More