mirror of
https://github.com/rclone/rclone.git
synced 2026-01-19 17:03:57 +00:00
docs: fix typos in comments and messages
This commit is contained in:
@@ -77,7 +77,7 @@ The DOI provider can be set when rclone does not automatically recognize a suppo
|
||||
Name: "doi_resolver_api_url",
|
||||
Help: `The URL of the DOI resolver API to use.
|
||||
|
||||
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
|
||||
The DOI resolver can be set for testing or for cases when the canonical DOI resolver API cannot be used.
|
||||
|
||||
Defaults to "https://doi.org/api".`,
|
||||
Required: false,
|
||||
|
||||
@@ -403,7 +403,7 @@ This is why this flag is not set as the default.
|
||||
|
||||
As a rule of thumb if nearly all of your data is under rclone's root
|
||||
directory (the |root/directory| in |onedrive:root/directory|) then
|
||||
using this flag will be be a big performance win. If your data is
|
||||
using this flag will be a big performance win. If your data is
|
||||
mostly not under the root then using this flag will be a big
|
||||
performance loss.
|
||||
|
||||
|
||||
@@ -1258,7 +1258,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
|
||||
} else {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||
|
||||
@@ -54,7 +54,7 @@ var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: strings.ReplaceAll(`Above this size files will be chunked.
|
||||
|
||||
Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
|
||||
Above this size files will be chunked into a |`+segmentsContainerSuffix+`| container
|
||||
or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
|
||||
for more info). Default for this is 5 GiB which is its maximum value, which
|
||||
means only files above this size will be chunked.
|
||||
|
||||
@@ -341,7 +341,7 @@ func (h *testState) preconfigureServer() {
|
||||
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
|
||||
// filesystem without additional parsing [1]. Our workaround is roughly to add
|
||||
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
|
||||
// I'm not sure this generalizes, but it works for the the kinds of inputs we're
|
||||
// I'm not sure this generalizes, but it works for the kinds of inputs we're
|
||||
// throwing at it.
|
||||
//
|
||||
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
|
||||
|
||||
@@ -97,7 +97,7 @@ with the following options:
|
||||
- If ` + "`--files-only`" + ` is specified then files will be returned only,
|
||||
no directories.
|
||||
|
||||
If ` + "`--stat`" + ` is set then the the output is not an array of items,
|
||||
If ` + "`--stat`" + ` is set then the output is not an array of items,
|
||||
but instead a single JSON blob will be returned about the item pointed to.
|
||||
This will return an error if the item isn't found, however on bucket based
|
||||
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
|
||||
|
||||
@@ -71,7 +71,7 @@ rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=m
|
||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||
` + "```" + `
|
||||
|
||||
The vfsOpt are as described in options/get and can be seen in the the
|
||||
The vfsOpt are as described in options/get and can be seen in the
|
||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||
|
||||
` + "```console" + `
|
||||
|
||||
@@ -764,7 +764,7 @@ func SetCacheDir(path string) (err error) {
|
||||
//
|
||||
// To override the default we therefore set environment variable TMPDIR
|
||||
// on Unix systems, and both TMP and TEMP on Windows (they are almost exclusively
|
||||
// aliases for the same path, and programs may refer to to either of them).
|
||||
// aliases for the same path, and programs may refer to either of them).
|
||||
// This should make all libraries and forked processes use the same.
|
||||
func SetTempDir(path string) (err error) {
|
||||
var tempDir string
|
||||
|
||||
@@ -31,7 +31,7 @@ func camelToSnake(in string) string {
|
||||
//
|
||||
// Builtin types are expected to be encoding as their natural
|
||||
// stringificatons as produced by fmt.Sprint except for []string which
|
||||
// is expected to be encoded a a CSV with empty array encoded as "".
|
||||
// is expected to be encoded as a CSV with empty array encoded as "".
|
||||
//
|
||||
// Any other types are expected to be encoded by their String()
|
||||
// methods and decoded by their `Set(s string) error` methods.
|
||||
@@ -93,7 +93,7 @@ func StringToInterface(def any, in string) (newValue any, err error) {
|
||||
//
|
||||
// Builtin types are expected to be encoding as their natural
|
||||
// stringificatons as produced by fmt.Sprint except for []string which
|
||||
// is expected to be encoded a a CSV with empty array encoded as "".
|
||||
// is expected to be encoded as a CSV with empty array encoded as "".
|
||||
//
|
||||
// Any other types are expected to be encoded by their String()
|
||||
// methods and decoded by their `Set(s string) error` methods.
|
||||
|
||||
@@ -180,7 +180,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
|
||||
|
||||
// GetPasswordCommand gets the password using the --password-command setting
|
||||
//
|
||||
// If the the --password-command flag was not in use it returns "", nil
|
||||
// If the --password-command flag was not in use it returns "", nil
|
||||
func GetPasswordCommand(ctx context.Context) (pass string, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if len(ci.PasswordCommand) == 0 {
|
||||
|
||||
@@ -225,7 +225,7 @@ func fromTypes(set Set) (map[Type]hash.Hash, error) {
|
||||
// single multiwriter, where one write will update all
|
||||
// the hashers.
|
||||
func toMultiWriter(h map[Type]hash.Hash) io.Writer {
|
||||
// Convert to to slice
|
||||
// Convert to slice
|
||||
var w = make([]io.Writer, 0, len(h))
|
||||
for _, v := range h {
|
||||
w = append(w, v)
|
||||
|
||||
@@ -79,7 +79,7 @@ type Options struct {
|
||||
File string `config:"log_file"` // Log everything to this file
|
||||
MaxSize fs.SizeSuffix `config:"log_file_max_size"` // Max size of log file
|
||||
MaxBackups int `config:"log_file_max_backups"` // Max backups of log file
|
||||
MaxAge fs.Duration `config:"log_file_max_age"` // Max age of of log file
|
||||
MaxAge fs.Duration `config:"log_file_max_age"` // Max age of log file
|
||||
Compress bool `config:"log_file_compress"` // Set to compress log file
|
||||
Format logFormat `config:"log_format"` // Comma separated list of log format options
|
||||
UseSyslog bool `config:"syslog"` // Use Syslog for logging
|
||||
|
||||
@@ -806,7 +806,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
|
||||
|
||||
// Create a file and sync it. Change the last modified date and the
|
||||
// file contents but not the size. If we're only doing sync by size
|
||||
// only, we expect nothing to to be transferred on the second sync.
|
||||
// only, we expect nothing to be transferred on the second sync.
|
||||
func TestSyncSizeOnly(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
@@ -843,7 +843,7 @@ func TestSyncSizeOnly(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a file and sync it. Keep the last modified date but change
|
||||
// the size. With --ignore-size we expect nothing to to be
|
||||
// the size. With --ignore-size we expect nothing to be
|
||||
// transferred on the second sync.
|
||||
func TestSyncIgnoreSize(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -328,7 +328,7 @@ type Flagger interface {
|
||||
// satisfy as non-pointers
|
||||
//
|
||||
// These are from pflag.Value and need to be tested against
|
||||
// non-pointer value due the the way the backend flags are inserted
|
||||
// non-pointer value due to the way the backend flags are inserted
|
||||
// into the flags.
|
||||
type FlaggerNP interface {
|
||||
String() string
|
||||
|
||||
@@ -46,7 +46,7 @@ type Pool struct {
|
||||
}
|
||||
|
||||
// totalMemory is a semaphore used to control total buffer usage of
|
||||
// all Pools. It it may be nil in which case the total buffer usage
|
||||
// all Pools. It may be nil in which case the total buffer usage
|
||||
// will not be controlled. It counts memory in active use, it does not
|
||||
// count memory cached in the pool.
|
||||
var totalMemory *semaphore.Weighted
|
||||
|
||||
@@ -254,7 +254,7 @@ func (wb *WriteBack) SetID(pid *Handle) {
|
||||
//
|
||||
// Use SetID to create Handles in advance of calling Add.
|
||||
//
|
||||
// If modified is false then it it doesn't cancel a pending upload if
|
||||
// If modified is false then it doesn't cancel a pending upload if
|
||||
// there is one as there is no need.
|
||||
func (wb *WriteBack) Add(id Handle, name string, size int64, modified bool, putFn PutFn) Handle {
|
||||
wb.mu.Lock()
|
||||
|
||||
Reference in New Issue
Block a user