1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-01 08:03:26 +00:00

Compare commits

..

1 Commits

237 changed files with 410 additions and 846 deletions

View File

@@ -77,7 +77,7 @@ Make sure you
* Add [documentation](#writing-documentation) for a new feature.
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
When you are done with that push your changes to GitHub:
When you are done with that push your changes to Github:
git push -u origin my-new-feature
@@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
## Using Git and GitHub ##
## Using Git and Github ##
### Committing your changes ###

View File

@@ -53,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release
If rclone needs a point release due to some horrendous bug:

View File

@@ -1,4 +1,3 @@
// Package alias implements a virtual provider to rename existing remotes.
package alias
import (

View File

@@ -1,4 +1,3 @@
// Package all imports all the backends
package all
import (

View File

@@ -1,7 +1,8 @@
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package azureblob provides an interface to the Microsoft Azure blob object storage system
package azureblob
import (
@@ -1677,14 +1678,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
uploadParts := maxUploadParts
uploadParts := int64(maxUploadParts)
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
// calculate size of parts/blocks
partSize := chunksize.Calculator(o, src.Size(), uploadParts, o.fs.opt.ChunkSize)
partSize := chunksize.Calculator(o, int(uploadParts), o.fs.opt.ChunkSize)
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(partSize),

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Backblaze B2 API.
package api
import (
@@ -239,7 +238,7 @@ type GetFileInfoRequest struct {
// If the original source of the file being uploaded has a last
// modified time concept, Backblaze recommends using
// src_last_modified_millis as the name, and a string holding the base
// 10 number of milliseconds since midnight, January 1, 1970
// 10 number number of milliseconds since midnight, January 1, 1970
// UTC. This fits in a 64 bit integer such as the type "long" in the
// programming language Java. It is intended to be compatible with
// Java's time long. For example, it can be passed directly into the

View File

@@ -1,4 +1,4 @@
// Package b2 provides an interface to the Backblaze B2 object storage system.
// Package b2 provides an interface to the Backblaze B2 object storage system
package b2
// FIXME should we remove sha1 checks from here as rclone now supports

View File

@@ -97,7 +97,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
chunkSize = chunksize.Calculator(src, maxParts, defaultChunkSize)
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++

View File

@@ -14,7 +14,7 @@ const (
timeFormat = `"` + time.RFC3339 + `"`
)
// Time represents date and time information for the
// Time represents represents date and time information for the
// box API, by using RFC3339
type Time time.Time

View File

@@ -266,7 +266,7 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry

View File

@@ -1,7 +1,6 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cache implements a virtual provider to cache existing remotes.
package cache
import (

View File

@@ -64,7 +64,7 @@ import (
// length of 13 decimals it makes a 7-digit base-36 number.
//
// When transactions is set to the norename style, data chunks will
// keep their temporary chunk names (with the transaction identifier
// keep their temporary chunk names (with the transacion identifier
// suffix). To distinguish them from temporary chunks, the txn field
// of the metadata file is set to match the transaction identifier of
// the data chunks.
@@ -1079,7 +1079,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
// readXactID returns the transaction ID stored in the passed metadata object
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
// if xactID has already been read and cached return it now
// if xactID has already been read and cahced return it now
if o.xIDCached {
return o.xactID, nil
}

View File

@@ -1,4 +1,4 @@
// Package combine implents a backend to combine multiple remotes in a directory tree
// Package combine implents a backend to combine multipe remotes in a directory tree
package combine
/*

View File

@@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
Levels 1 to 9 increase compression at the cost of speed. Going past 6
generally offers very little return.
Level -2 uses Huffman encoding only. Only use if you know what you
Level -2 uses Huffmann encoding only. Only use if you know what you
are doing.
Level 0 turns off compression.`,
Default: sgzip.DefaultCompression,
@@ -130,7 +130,7 @@ type Fs struct {
features *fs.Features // optional features
}
// NewFs constructs an Fs from the path, container:path
// NewFs contstructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
@@ -451,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
}
// Need to include what we already read
// Need to include what we allready read
in = &ReadCloserWrapper{
Reader: io.MultiReader(bytes.NewReader(buf), in),
Closer: in,
@@ -731,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
}
// If our new object is compressed we have to rename it with the correct size.
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
if compressible {
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
if err != nil {
@@ -742,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
return newObj, nil
}
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
// will break stuff. Right no I can't think of a way to make this work.
// PutUnchecked uploads the object

View File

@@ -131,7 +131,7 @@ type fileNameEncoding interface {
// - we strip the padding character `=`
type caseInsensitiveBase32Encoding struct{}
// EncodeToString encodes a string using the modified version of
// EncodeToString encodes a strign using the modified version of
// base32 encoding.
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
encoded := base32.HexEncoding.EncodeToString(src)

View File

@@ -125,7 +125,7 @@ names, or for debugging purposes.`,
This option could help with shortening the encrypted filename. The
suitable option would depend on the way your remote count the filename
length and if it's case sensitive.`,
length and if it's case sensitve.`,
Default: "base32",
Examples: []fs.OptionExample{
{

View File

@@ -1210,7 +1210,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true,
CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f)
// Create a new authorized Drive client.
@@ -3306,7 +3305,7 @@ drives found and a combined drive.
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
Adding this to the rclone config file will cause those team drives to
be accessible with the aliases shown. Any illegal characters will be
be accessible with the aliases shown. Any illegal charactes will be
substituted with "_" and duplicate names will have numbers suffixed.
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree.

View File

@@ -518,9 +518,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{}
err := opt.MaxAge.Set("1h")
assert.NoError(t, err)

View File

@@ -309,7 +309,7 @@ func (b *batcher) Shutdown() {
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
fs.Infof(b.f, "Commiting uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message

View File

@@ -268,7 +268,7 @@ default based on the batch_mode in use.
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Help: `Max time to wait for a batch to finish comitting`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
@@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
delta := int64(correctOffset) - int64(cursor.Offset)
skip += delta
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
if skip < 0 {
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
} else if skip == chunkSize {

View File

@@ -1,4 +1,3 @@
// Package fichier provides an interface to the 1Fichier storage system.
package fichier
import (

View File

@@ -84,7 +84,7 @@ type CopyFileResponse struct {
URLs []FileCopy `json:"urls"`
}
// FileCopy is used in the CopyFileResponse
// FileCopy is used in the the CopyFileResponse
type FileCopy struct {
FromURL string `json:"from_url"`
ToURL string `json:"to_url"`

View File

@@ -19,7 +19,7 @@ const (
timeFormatJSON = `"` + timeFormatParameters + `"`
)
// Time represents date and time information for the
// Time represents represents date and time information for the
// filefabric API
type Time time.Time
@@ -95,7 +95,7 @@ type Status struct {
// Warning string `json:"warning"` // obsolete
}
// Status satisfies the error interface
// Status statisfies the error interface
func (e *Status) Error() string {
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
}

View File

@@ -150,7 +150,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
m configmap.Mapper // to save config
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenMu sync.Mutex // hold when reading the token
@@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
return f.purgeCheck(ctx, dir, false)
}
// Wait for the background task to complete if necessary
// Wait for the the background task to complete if necessary
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
if taskID == "" || taskID == "0" {
// No task to wait for

View File

@@ -81,22 +81,8 @@ security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
Help: strings.Replace(`Maximum number of FTP simultaneous connections, 0 for unlimited.
Note that setting this is very likely to cause deadlocks so it should
be used with care.
If you are doing a sync or copy then make sure concurrency is one more
than the sum of |--transfers| and |--checkers|.
If you use |--check-first| then it just needs to be one more than the
maximum of |--checkers| and |--transfers|.
So for |concurrency 3| you'd use |--checkers 2 --transfers 2
--check-first| or |--checkers 1 --transfers 1|.
`, "|", "`", -1),
Name: "concurrency",
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited.",
Default: 0,
Advanced: true,
}, {

View File

@@ -311,7 +311,7 @@ rclone does if you know the bucket exists already.
Help: `If set this will decompress gzip encoded objects.
It is possible to upload objects to GCS with "Content-Encoding: gzip"
set. Normally rclone will download these files as compressed objects.
set. Normally rclone will download these files files as compressed objects.
If this flag is set then rclone will decompress these files with
"Content-Encoding: gzip" as they are received. This means that rclone
@@ -319,10 +319,6 @@ can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -347,7 +343,6 @@ type Options struct {
StorageClass string `config:"storage_class"`
NoCheckBucket bool `config:"no_check_bucket"`
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -528,11 +523,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Create a new authorized Drive client.
f.client = oAuthClient
gcsOpts := []option.ClientOption{option.WithHTTPClient(f.client)}
if opt.Endpoint != "" {
gcsOpts = append(gcsOpts, option.WithEndpoint(opt.Endpoint))
}
f.svc, err = storage.NewService(context.Background(), gcsOpts...)
f.svc, err = storage.NewService(context.Background(), option.WithHTTPClient(f.client))
if err != nil {
return nil, fmt.Errorf("couldn't create Google Cloud Storage client: %w", err)
}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Google Photos API.
package api
import (

View File

@@ -178,7 +178,7 @@ type Fs struct {
opt Options // parsed options
features *fs.Features // optional features
unAuth *rest.Client // unauthenticated http client
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
ts *oauthutil.TokenSource // token source for oauth2
pacer *fs.Pacer // To pace the API calls
startTime time.Time // time Fs was started - used for datestamps

View File

@@ -1,7 +1,6 @@
//go:build !plan9
// +build !plan9
// Package hdfs provides an interface to the HDFS storage system.
package hdfs
import (

View File

@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
}
// Do not allow the root-prefix to be nonexistent nor a directory,
// Do not allow the root-prefix to be non-existent nor a directory,
// but it can be empty.
if f.opt.RootPrefix != "" {
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// should be retried after the parent-directories of the destination have been created.
// If so, it will create the parent-directories.
//
// If any errors arise while finding the source or
// If any errors arrise while finding the source or
// creating the parent-directory those will be returned.
// Otherwise returns the originalError.
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} else {
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
}
// Try to check if object was updated, either way.
// Try to check if object was updated, eitherway.
// Metadata should be updated even if the upload fails.
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
} else {

View File

@@ -138,7 +138,7 @@ var testTable = []struct {
// pattern describes how to use data to construct the hash-input.
// For every entry n at even indices this repeats the data n times.
// For every entry m at odd indices this repeats a null-byte m times.
// The input-data is constructed by concatenating the results in order.
// The input-data is constructed by concatinating the results in order.
pattern []int64
out []byte
name string

View File

@@ -1,4 +1,3 @@
// Package internal provides utilities for HiDrive.
package internal
import (

View File

@@ -227,7 +227,7 @@ type Object struct {
rawData json.RawMessage
}
// IAFile represents a subset of object in MetadataResponse.Files
// IAFile reprensents a subset of object in MetadataResponse.Files
type IAFile struct {
Name string `json:"name"`
// Source string `json:"source"`
@@ -243,7 +243,7 @@ type IAFile struct {
rawData json.RawMessage
}
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
type MetadataResponse struct {
Files []IAFile `json:"files"`
ItemSize int64 `json:"item_size"`
@@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Jottacloud API.
package api
import (

View File

@@ -1,4 +1,3 @@
// Package jottacloud provides an interface to the Jottacloud storage system.
package jottacloud
import (
@@ -1418,7 +1417,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)

View File

@@ -1,4 +1,3 @@
// Package koofr provides an interface to the Koofr storage system.
package koofr
import (
@@ -668,7 +667,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
//
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
//
// I am not sure about meaning of "path" parameter; in my experiments
// I am not sure about meaning of "path" parameter; in my expriments
// it is always "%2F", and omitting it or putting any other value
// results in 404.
//

View File

@@ -22,7 +22,6 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
@@ -235,16 +234,15 @@ type Options struct {
// Fs represents a local filesystem rooted at root
type Fs struct {
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported int32 // whether xattrs are supported (atomic access)
name string // the name of the remote
root string // The root directory (OS path)
opt Options // parsed config options
features *fs.Features // optional features
dev uint64 // device number of root node
precisionOk sync.Once // Whether we need to read the precision
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -288,9 +286,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
dev: devUnset,
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported = 1
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
CaseInsensitive: f.caseInsensitive(),
@@ -300,7 +295,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -445,8 +439,6 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
fsDirPath := f.localPath(dir)
_, err = os.Stat(fsDirPath)
if err != nil {
@@ -497,13 +489,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
if fierr != nil {
// Don't report errors on any file names that are excluded
if useFilter {
newRemote := f.cleanRemote(dir, name)
if !filter.IncludeRemote(newRemote) {
continue
}
}
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
@@ -521,11 +506,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)

View File

@@ -9,13 +9,11 @@ import (
"path"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
@@ -192,7 +190,7 @@ func TestHashOnUpdate(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with different contents but same size and timestamp
// Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
@@ -368,36 +366,3 @@ func TestMetadata(t *testing.T) {
})
}
func TestFilter(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[excluded included]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}

View File

@@ -9,7 +9,7 @@ import (
const haveSetBTime = false
// setBTime changes the birth time of the file passed in
// setBTime changes the the birth time of the file passed in
func setBTime(name string, btime time.Time) error {
// Does nothing
return nil

View File

@@ -11,7 +11,7 @@ import (
const haveSetBTime = true
// setBTime sets the birth time of the file passed in
// setBTime sets the the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
h, err := syscall.Open(name, os.O_RDWR, 0755)
if err != nil {

View File

@@ -6,8 +6,6 @@ package local
import (
"fmt"
"strings"
"sync/atomic"
"syscall"
"github.com/pkg/xattr"
"github.com/rclone/rclone/fs"
@@ -18,30 +16,12 @@ const (
xattrSupported = xattr.XATTR_SUPPORTED
)
// Check to see if the error supplied is a not supported error, and if
// so, disable xattrs
func (f *Fs) xattrIsNotSupported(err error) bool {
xattrErr, ok := err.(*xattr.Error)
if !ok {
return false
}
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
}
return true
}
return false
}
// getXattr returns the extended attributes for an object
//
// It doesn't return any attributes owned by this backend in
// metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
if !xattrSupported {
return nil, nil
}
var list []string
@@ -51,9 +31,6 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
list, err = xattr.LList(o.path)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to read xattr: %w", err)
}
if len(list) == 0 {
@@ -68,9 +45,6 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
v, err = xattr.LGet(o.path, k)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil, nil
}
return nil, fmt.Errorf("failed to read xattr key %q: %w", k, err)
}
k = strings.ToLower(k)
@@ -90,7 +64,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
//
// It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
if !xattrSupported {
return nil
}
for k, value := range metadata {
@@ -106,9 +80,6 @@ func (o *Object) setXattr(metadata fs.Metadata) (err error) {
err = xattr.LSet(o.path, k, v)
}
if err != nil {
if o.fs.xattrIsNotSupported(err) {
return nil
}
return fmt.Errorf("failed to set xattr key %q: %w", k, err)
}
}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Mail.ru API.
package api
import (

View File

@@ -1,4 +1,3 @@
// Package mailru provides an interface to the Mail.ru Cloud storage system.
package mailru
import (

View File

@@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
}
}
if err != nil {
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
}
// i is number of directories to create (may be 0)
// node is directory to create them from
@@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
return f._rootNode, nil
}
// Check for preexisting root
// Check for pre-existing root
absRoot := f.srv.FS.GetRoot()
node, err := f.findDir(absRoot, f.root)
//log.Printf("findRoot findDir %p %v", node, err)

View File

@@ -118,7 +118,7 @@ type Fs struct {
filetype string // dir, file or symlink
dirscreated map[string]bool // if implicit dir has been created already
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
statcache map[string][]File // cache successful stat requests
statcache map[string][]File // cache successfull stat requests
statcacheMutex sync.RWMutex // RWMutex to protect statcache
}
@@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string {
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.filetype == "" {
// This happens in two scenarios.
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
err := f.initFs(ctx, dir)
if err != nil {
@@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.filetype == "" {
// This happens in two scenarios.
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
err := f.initFs(ctx, dir)
if err != nil {

View File

@@ -1,4 +1,5 @@
// Package api provides types used by the OneDrive API.
// Types passed and returned to and from the API
package api
import (
@@ -13,7 +14,7 @@ const (
PackageTypeOneNote = "oneNote"
)
// Error is returned from OneDrive when things go wrong
// Error is returned from one drive when things go wrong
type Error struct {
ErrorInfo struct {
Code string `json:"code"`
@@ -70,7 +71,7 @@ type Drive struct {
Quota Quota `json:"quota"`
}
// Timestamp represents date and time information for the
// Timestamp represents represents date and time information for the
// OneDrive API, by using ISO 8601 and is always in UTC time.
type Timestamp time.Time

View File

@@ -600,14 +600,14 @@ type Options struct {
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote OneDrive
// Fs represents a remote one drive
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *rest.Client // the connection to the OneDrive server
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
@@ -615,7 +615,7 @@ type Fs struct {
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
}
// Object describes a OneDrive object
// Object describes a one drive object
//
// Will definitely have info but maybe not meta
type Object struct {
@@ -645,7 +645,7 @@ func (f *Fs) Root() string {
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("OneDrive root '%s'", f.root)
return fmt.Sprintf("One drive root '%s'", f.root)
}
// Features returns the optional features of this Fs
@@ -653,7 +653,7 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// parsePath parses a OneDrive 'url'
// parsePath parses a one drive 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return

View File

@@ -1,4 +1,3 @@
// Package opendrive provides an interface to the OpenDrive storage system.
package opendrive
import (

View File

@@ -13,7 +13,7 @@ const (
timeFormat = `"` + time.RFC1123Z + `"`
)
// Time represents date and time information for the
// Time represents represents date and time information for the
// pcloud API, by using RFC1123Z
type Time time.Time

View File

@@ -1,4 +1,3 @@
// Package putio provides an interface to the put.io storage system.
package putio
import (

View File

@@ -1,8 +1,9 @@
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
//go:build !plan9 && !js
// +build !plan9,!js
// Package qingstor provides an interface to QingStor object storage
// Home: https://www.qingcloud.com/
package qingstor
import (

View File

@@ -2009,7 +2009,7 @@ See [the time option docs](/docs/#time-option) for valid formats.
Help: `If set this will decompress gzip encoded objects.
It is possible to upload objects to S3 with "Content-Encoding: gzip"
set. Normally rclone will download these files as compressed objects.
set. Normally rclone will download these files files as compressed objects.
If this flag is set then rclone will decompress these files with
"Content-Encoding: gzip" as they are received. This means that rclone
@@ -2116,7 +2116,7 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxUploadParts int `config:"max_upload_parts"`
MaxUploadParts int64 `config:"max_upload_parts"`
DisableChecksum bool `config:"disable_checksum"`
SharedCredentialsFile string `config:"shared_credentials_file"`
Profile string `config:"profile"`
@@ -4718,10 +4718,10 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*int64(uploadParts)))
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*uploadParts))
})
} else {
partSize = chunksize.Calculator(o, size, uploadParts, f.opt.ChunkSize)
partSize = chunksize.Calculator(o, int(uploadParts), f.opt.ChunkSize)
}
memPool := f.getMemoryPool(int64(partSize))
@@ -5199,7 +5199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
var head s3.HeadObjectOutput
//structs.SetFrom(&head, &req)
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
head.ETag = &md5sumHex // doesn't matter quotes are missing
head.ETag = &md5sumHex // doesn't matter quotes are misssing
head.ContentLength = &size
// If we have done a single part PUT request then we can read these
if gotEtag != "" {

View File

@@ -78,7 +78,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
}
t.Run("GzipEncoding", func(t *testing.T) {
// Test that the gzipped file we uploaded can be
// Test that the gziped file we uploaded can be
// downloaded with and without decompression
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
gotContents := fstests.ReadObject(ctx, t, o, -1)
@@ -116,7 +116,7 @@ func (f *Fs) InternalTestNoHead(t *testing.T) {
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
// PutTestcontents checks the received object
// PutTestcontests checks the received object
}

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Seafile API.
package api
// Some api objects are duplicated with only small differences,

View File

@@ -1,4 +1,3 @@
// Package seafile provides an interface to the Seafile storage system.
package seafile
import (
@@ -137,7 +136,7 @@ type Fs struct {
features *fs.Features // optional features
endpoint *url.URL // URL of the host
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
pacer *fs.Pacer // pacer for API calls
authMu sync.Mutex // Mutex to protect library decryption
createDirMutex sync.Mutex // Protect creation of directories

View File

@@ -1,7 +1,8 @@
// Package sftp provides a filesystem interface using github.com/pkg/sftp
//go:build !plan9
// +build !plan9
// Package sftp provides a filesystem interface using github.com/pkg/sftp
package sftp
import (
@@ -275,24 +276,19 @@ Set to 0 to keep connections indefinitely.
Name: "chunk_size",
Help: `Upload and download chunk size.
This controls the maximum size of payload in SFTP protocol packets.
The RFC limits this to 32768 bytes (32k), which is the default. However,
a lot of servers support larger sizes, typically limited to a maximum
total package size of 256k, and setting it larger will increase transfer
speed dramatically on high latency links. This includes OpenSSH, and,
for example, using the value of 255k works well, leaving plenty of room
for overhead while still being within a total packet size of 256k.
This controls the maximum packet size used in the SFTP protocol. The
RFC limits this to 32768 bytes (32k), however a lot of servers
support larger sizes and setting it larger will increase transfer
speed dramatically on high latency links.
Make sure to test thoroughly before using a value higher than 32k,
and only use it if you always connect to the same server or after
sufficiently broad testing. If you get errors such as
"failed to send packet payload: EOF", lots of "connection lost",
or "corrupted on transfer", when copying a larger file, try lowering
the value. The server run by [rclone serve sftp](/commands/rclone_serve_sftp)
sends packets with standard 32k maximum payload so you must not
set a different chunk_size when downloading files, but it accepts
packets up to the 256k total size, so for uploads the chunk_size
can be set as for the OpenSSH example above.
Only use a setting higher than 32k if you always connect to the same
server or after sufficiently broad testing.
For example using the value of 252k with OpenSSH works well with its
maximum packet size of 256k.
If you get the error "failed to send packet header: EOF" when copying
a large file, try lowering this number.
`,
Default: 32 * fs.Kibi,
Advanced: true,

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Sia API.
package api
import (

View File

@@ -1,4 +1,3 @@
// Package sia provides an interface to the Sia storage system.
package sia
import (

View File

@@ -200,7 +200,7 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
m configmap.Mapper // config file access

View File

@@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) {
want string
}{
{"", ""},
{"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
{"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
{"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"},
{"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"},
{"0123456789", "0123456789"},
{"abc/ABC/123", "abc/ABC/123"},
{" ", "%20%20%20"},

View File

@@ -1,4 +1,3 @@
// Package policy provides utilities for the union implementation.
package policy
import (

View File

@@ -1,4 +1,3 @@
// Package union implements a virtual provider to join existing remotes.
package union
import (

View File

@@ -1,4 +1,3 @@
// Package upstream provides utility functionality to union.
package upstream
import (

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Uptobox API.
package api
import "fmt"
@@ -80,7 +79,7 @@ type UploadInfo struct {
} `json:"data"`
}
// UploadResponse is the response to a successful upload
// UploadResponse is the respnse to a successful upload
type UploadResponse struct {
Files []struct {
Name string `json:"name"`

View File

@@ -1,4 +1,3 @@
// Package uptobox provides an interface to the Uptobox storage system.
package uptobox
import (
@@ -163,7 +162,7 @@ func (f *Fs) splitPathFull(pth string) (string, string) {
return "//" + fullPath[:i], fullPath[i+1:]
}
// splitPath is modified splitPath version that doesn't include the separator
// splitPath is modified splitPath version that doesn't include the seperator
// in the base path
func (f *Fs) splitPath(pth string) (string, string) {
// chop of any leading or trailing '/'
@@ -479,7 +478,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
// yes it does take take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
// create upload request
opts := rest.Opts{
@@ -757,7 +756,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
if err != nil {
return fmt.Errorf("dirmove: source not found: %w", err)
}
// check if the destination already exists
// check if the destination allready exists
dstPath := f.dirPath(dstRemote)
_, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1})
if err == nil {
@@ -782,7 +781,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
needMove := srcBase != dstBase
// if we have to rename we'll have to use a temporary name since
// there could already be a directory with the same name as the src directory
// there could allready be a directory with the same name as the src directory
if needRename {
// rename to a temporary name
tmpName := "rcloneTemp" + random.String(8)

View File

@@ -148,7 +148,7 @@ type Fs struct {
features *fs.Features // optional features
endpoint *url.URL // URL of the host
endpointURL string // endpoint as a string
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
pacer *fs.Pacer // pacer for API calls
precision time.Duration // mod time precision
canStream bool // set if can stream

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Yandex API.
package api
import (

View File

@@ -1,4 +1,3 @@
// Package yandex provides an interface to the Yandex storage system.
package yandex
import (

View File

@@ -1,4 +1,3 @@
// Package api provides types used by the Zoho API.
package api
import (
@@ -6,7 +5,7 @@ import (
"time"
)
// Time represents date and time information for Zoho
// Time represents represents date and time information for Zoho
// Zoho uses milliseconds since unix epoch (Java currentTimeMillis)
type Time time.Time

View File

@@ -150,8 +150,8 @@ func init() {
return workspace.ID, workspace.Attributes.Name
})
case "workspace_end":
workspaceID := config.Result
m.Set(configRootID, workspaceID)
worksspaceID := config.Result
m.Set(configRootID, worksspaceID)
return nil, nil
}
return nil, fmt.Errorf("unknown state %q", config.State)
@@ -206,7 +206,7 @@ type Fs struct {
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
srv *rest.Client // the connection to the one drive server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
}
@@ -1264,7 +1264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// upload was successful, need to delete old object before rename
// upload was successfull, need to delete old object before rename
if err = o.Remove(ctx); err != nil {
return fmt.Errorf("failed to remove old object: %w", err)
}

View File

@@ -15,7 +15,6 @@ else
fi
rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \
--fast-list \
--include "/${version}**" \
--include "/branch/*/${version}**" \
--include "/branch/${version}**" \
memstore:beta-rclone-org

View File

@@ -1,4 +1,3 @@
// Package about provides the about command.
package about
import (

View File

@@ -1,4 +1,3 @@
// Package authorize provides the authorize command.
package authorize
import (

View File

@@ -1,4 +1,3 @@
// Package backend provides the backend command.
package backend
import (

View File

@@ -290,7 +290,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
return
}
// excessDeletes checks whether number of deletes is within allowed range
// exccessDeletes checks whether number of deletes is within allowed range
func (ds *deltaSet) excessDeletes() bool {
maxDelete := ds.opt.MaxDelete
maxRatio := float64(maxDelete) / 100.0

View File

@@ -15,7 +15,7 @@ func makeHelp(help string) string {
return replacer.Replace(help)
}
var shortHelp = `Perform bidirectional synchronization between two paths.`
var shortHelp = `Perform bidirectonal synchronization between two paths.`
var rcHelp = makeHelp(`This takes the following parameters

View File

@@ -1,7 +1,6 @@
//go:build !plan9 && !js
// +build !plan9,!js
// Package cachestats provides the cachestats command.
package cachestats
import (

View File

@@ -1,4 +1,3 @@
// Package cat provides the cat command.
package cat
import (

View File

@@ -1,4 +1,3 @@
// Package check provides the check command.
package check
import (

View File

@@ -1,4 +1,3 @@
// Package checksum provides the checksum command.
package checksum
import (

View File

@@ -1,4 +1,3 @@
// Package cleanup provides the cleanup command.
package cleanup
import (

View File

@@ -1,10 +1,11 @@
// Package cmount implements a FUSE mounting system for rclone remotes.
//
// This uses the cgo based cgofuse library
//go:build cmount && ((linux && cgo) || (darwin && cgo) || (freebsd && cgo) || windows)
// +build cmount
// +build linux,cgo darwin,cgo freebsd,cgo windows
// Package cmount implements a FUSE mounting system for rclone remotes.
//
// This uses the cgo based cgofuse library
package cmount
import (

View File

@@ -1,10 +1,9 @@
// Build for macos with the brew tag to handle the absence
// of fuse and print an appropriate error message
//go:build brew && darwin
// +build brew,darwin
// Package cmount implements a FUSE mounting system for rclone remotes.
//
// Build for macos with the brew tag to handle the absence
// of fuse and print an appropriate error message
package cmount
import (

View File

@@ -3,8 +3,6 @@
// +build linux,cgo darwin,cgo freebsd,cgo windows
// +build !race !windows
// Package cmount implements a FUSE mounting system for rclone remotes.
//
// FIXME this doesn't work with the race detector under Windows either
// hanging or producing lots of differences.

View File

@@ -1,11 +1,10 @@
// Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build !((linux && cgo && cmount) || (darwin && cgo && cmount) || (freebsd && cgo && cmount) || (windows && cmount))
// +build !linux !cgo !cmount
// +build !darwin !cgo !cmount
// +build !freebsd !cgo !cmount
// +build !windows !cmount
// Package cmount implements a FUSE mounting system for rclone remotes.
//
// Build for cmount for unsupported platforms to stop go complaining
// about "no buildable Go source files".
package cmount

View File

@@ -80,7 +80,7 @@ func handleDefaultMountpath() (string, error) {
func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (string, error) {
// Assuming mount path is a valid network share path (UNC format, "\\Server\Share").
// Always mount as network drive, regardless of the NetworkMode option.
// Find an unused drive letter to use as mountpoint, the supplied path can
// Find an unused drive letter to use as mountpoint, the the supplied path can
// be used as volume prefix (network share path) instead of mountpoint.
if !opt.NetworkMode {
fs.Debugf(nil, "Forcing --network-mode because mountpoint path is network share UNC format")

View File

@@ -1,4 +1,3 @@
// Package config provides the config command.
package config
import (
@@ -140,7 +139,7 @@ are 100% certain you are already passing obscured passwords then use
|rclone config password| command.
The flag |--non-interactive| is for use by applications that wish to
configure rclone themselves, rather than using rclone's text based
configure rclone themeselves, rather than using rclone's text based
configuration questions. If this flag is set, and rclone needs to ask
the user a question, a JSON blob will be returned with the question in
it.

View File

@@ -1,4 +1,3 @@
// Package copy provides the copy command.
package copy
import (

View File

@@ -1,4 +1,3 @@
// Package copyto provides the copyto command.
package copyto
import (

View File

@@ -1,4 +1,3 @@
// Package copyurl provides the copyurl command.
package copyurl
import (

View File

@@ -1,4 +1,3 @@
// Package cryptcheck provides the cryptcheck command.
package cryptcheck
import (

View File

@@ -1,4 +1,3 @@
// Package cryptdecode provides the cryptdecode command.
package cryptdecode
import (

View File

@@ -1,4 +1,3 @@
// Package dedupe provides the dedupe command.
package dedupe
import (

View File

@@ -1,4 +1,3 @@
// Package delete provides the delete command.
package delete
import (

View File

@@ -1,4 +1,3 @@
// Package deletefile provides the deletefile command.
package deletefile
import (

View File

@@ -1,4 +1,3 @@
// Package genautocomplete provides the genautocomplete command.
package genautocomplete
import (

View File

@@ -1,4 +1,3 @@
// Package gendocs provides the gendocs command.
package gendocs
import (

View File

@@ -1,4 +1,3 @@
// Package hashsum provides the hashsum command.
package hashsum
import (
@@ -99,7 +98,7 @@ For the MD5 and SHA1 algorithms there are also dedicated commands,
This command can also hash data received on standard input (stdin),
by not passing a remote:path, or by passing a hyphen as remote:path
when there is data to read (if not, the hyphen will be treated literally,
when there is data to read (if not, the hypen will be treated literaly,
as a relative path).
Run without a hash to see the list of all supported hashes, e.g.

View File

@@ -343,7 +343,7 @@ func showBackend(name string) {
defaultValue := opt.GetValue()
// Default value and Required are related: Required means option must
// have a value, but if there is a default then a value does not have
// to be explicitly set and then Required makes no difference.
// to be explicitely set and then Required makes no difference.
if defaultValue != "" {
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
} else {

View File

@@ -1,4 +1,3 @@
// Package link provides the link command.
package link
import (

Some files were not shown because too many files have changed in this diff Show More