mirror of
https://github.com/rclone/rclone.git
synced 2025-12-15 15:53:41 +00:00
all: fix spelling across the project
* abcdefghijklmnopqrstuvwxyz * accounting * additional * allowed * almost * already * appropriately * arise * bandwidth * behave * bidirectional * brackets * cached * characters * cloud * committing * concatenating * configured * constructs * current * cutoff * deferred * different * directory * disposition * dropbox * either way * error * excess * experiments * explicitly * externally * files * github * gzipped * hierarchies * huffman * hyphen * implicitly * independent * insensitive * integrity * libraries * literally * metadata * mimics * missing * modification * multipart * multiple * nightmare * nonexistent * number * obscure * ourselves * overridden * potatoes * preexisting * priority * received * remote * replacement * represents * reproducibility * response * satisfies * sensitive * separately * separator * specifying * string * successful * synchronization * syncing * šenfeld * take * temporarily * testcontents * that * the * themselves * throttling * timeout * transaction * transferred * unnecessary * using * webbrowser * which * with * workspace Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
This commit is contained in:
@@ -239,7 +239,7 @@ type GetFileInfoRequest struct {
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// 10 number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ import (
|
||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||
//
|
||||
// When transactions is set to the norename style, data chunks will
|
||||
// keep their temporary chunk names (with the transacion identifier
|
||||
// keep their temporary chunk names (with the transaction identifier
|
||||
// suffix). To distinguish them from temporary chunks, the txn field
|
||||
// of the metadata file is set to match the transaction identifier of
|
||||
// the data chunks.
|
||||
@@ -1079,7 +1079,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
|
||||
// readXactID returns the transaction ID stored in the passed metadata object
|
||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
// if xactID has already been read and cahced return it now
|
||||
// if xactID has already been read and cached return it now
|
||||
if o.xIDCached {
|
||||
return o.xactID, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
||||
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||
package combine
|
||||
|
||||
/*
|
||||
|
||||
@@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||
generally offers very little return.
|
||||
|
||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
||||
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||
are doing.
|
||||
Level 0 turns off compression.`,
|
||||
Default: sgzip.DefaultCompression,
|
||||
@@ -130,7 +130,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -451,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||
}
|
||||
|
||||
// Need to include what we allready read
|
||||
// Need to include what we already read
|
||||
in = &ReadCloserWrapper{
|
||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||
Closer: in,
|
||||
@@ -731,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
}
|
||||
|
||||
// If our new object is compressed we have to rename it with the correct size.
|
||||
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
||||
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
||||
if compressible {
|
||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||
if err != nil {
|
||||
@@ -742,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
return newObj, nil
|
||||
}
|
||||
|
||||
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||
// will break stuff. Right no I can't think of a way to make this work.
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
|
||||
@@ -125,7 +125,7 @@ names, or for debugging purposes.`,
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
length and if it's case sensitive.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
|
||||
@@ -3305,7 +3305,7 @@ drives found and a combined drive.
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
be accessible with the aliases shown. Any illegal characters will be
|
||||
substituted with "_" and duplicate names will have numbers suffixed.
|
||||
It will also add a remote called AllDrives which shows all the shared
|
||||
drives combined into one directory tree.
|
||||
|
||||
@@ -309,7 +309,7 @@ func (b *batcher) Shutdown() {
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||
// show that batcher is shutting down
|
||||
close(b.closed)
|
||||
// quit the commitLoop by sending a quitRequest message
|
||||
|
||||
@@ -268,7 +268,7 @@ default based on the batch_mode in use.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish comitting`,
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
}, {
|
||||
@@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||
skip += delta
|
||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||
if skip < 0 {
|
||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||
} else if skip == chunkSize {
|
||||
|
||||
@@ -84,7 +84,7 @@ type CopyFileResponse struct {
|
||||
URLs []FileCopy `json:"urls"`
|
||||
}
|
||||
|
||||
// FileCopy is used in the the CopyFileResponse
|
||||
// FileCopy is used in the CopyFileResponse
|
||||
type FileCopy struct {
|
||||
FromURL string `json:"from_url"`
|
||||
ToURL string `json:"to_url"`
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// filefabric API
|
||||
type Time time.Time
|
||||
|
||||
@@ -95,7 +95,7 @@ type Status struct {
|
||||
// Warning string `json:"warning"` // obsolete
|
||||
}
|
||||
|
||||
// Status statisfies the error interface
|
||||
// Status satisfies the error interface
|
||||
func (e *Status) Error() string {
|
||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||
}
|
||||
|
||||
@@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, false)
|
||||
}
|
||||
|
||||
// Wait for the the background task to complete if necessary
|
||||
// Wait for the background task to complete if necessary
|
||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||
if taskID == "" || taskID == "0" {
|
||||
// No task to wait for
|
||||
|
||||
@@ -311,7 +311,7 @@ rclone does if you know the bucket exists already.
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
|
||||
@@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
|
||||
}
|
||||
|
||||
// Do not allow the root-prefix to be non-existent nor a directory,
|
||||
// Do not allow the root-prefix to be nonexistent nor a directory,
|
||||
// but it can be empty.
|
||||
if f.opt.RootPrefix != "" {
|
||||
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
|
||||
@@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
// should be retried after the parent-directories of the destination have been created.
|
||||
// If so, it will create the parent-directories.
|
||||
//
|
||||
// If any errors arrise while finding the source or
|
||||
// If any errors arise while finding the source or
|
||||
// creating the parent-directory those will be returned.
|
||||
// Otherwise returns the originalError.
|
||||
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
|
||||
@@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
} else {
|
||||
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
|
||||
}
|
||||
// Try to check if object was updated, eitherway.
|
||||
// Try to check if object was updated, either way.
|
||||
// Metadata should be updated even if the upload fails.
|
||||
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
|
||||
} else {
|
||||
|
||||
@@ -138,7 +138,7 @@ var testTable = []struct {
|
||||
// pattern describes how to use data to construct the hash-input.
|
||||
// For every entry n at even indices this repeats the data n times.
|
||||
// For every entry m at odd indices this repeats a null-byte m times.
|
||||
// The input-data is constructed by concatinating the results in order.
|
||||
// The input-data is constructed by concatenating the results in order.
|
||||
pattern []int64
|
||||
out []byte
|
||||
name string
|
||||
|
||||
@@ -227,7 +227,7 @@ type Object struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// IAFile reprensents a subset of object in MetadataResponse.Files
|
||||
// IAFile represents a subset of object in MetadataResponse.Files
|
||||
type IAFile struct {
|
||||
Name string `json:"name"`
|
||||
// Source string `json:"source"`
|
||||
@@ -243,7 +243,7 @@ type IAFile struct {
|
||||
rawData json.RawMessage
|
||||
}
|
||||
|
||||
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
||||
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
|
||||
type MetadataResponse struct {
|
||||
Files []IAFile `json:"files"`
|
||||
ItemSize int64 `json:"item_size"`
|
||||
@@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
|
||||
@@ -1418,7 +1418,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||
|
||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
||||
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||
|
||||
@@ -668,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
//
|
||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||
//
|
||||
// I am not sure about meaning of "path" parameter; in my expriments
|
||||
// I am not sure about meaning of "path" parameter; in my experiments
|
||||
// it is always "%2F", and omitting it or putting any other value
|
||||
// results in 404.
|
||||
//
|
||||
|
||||
@@ -192,7 +192,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||
|
||||
// Reupload it with diferent contents but same size and timestamp
|
||||
// Reupload it with different contents but same size and timestamp
|
||||
var b = bytes.NewBufferString("CONTENT")
|
||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||
err = o.Update(ctx, b, src)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
const haveSetBTime = false
|
||||
|
||||
// setBTime changes the the birth time of the file passed in
|
||||
// setBTime changes the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) error {
|
||||
// Does nothing
|
||||
return nil
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
|
||||
const haveSetBTime = true
|
||||
|
||||
// setBTime sets the the birth time of the file passed in
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
if err != nil {
|
||||
|
||||
@@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
||||
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
|
||||
}
|
||||
// i is number of directories to create (may be 0)
|
||||
// node is directory to create them from
|
||||
@@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
||||
return f._rootNode, nil
|
||||
}
|
||||
|
||||
// Check for pre-existing root
|
||||
// Check for preexisting root
|
||||
absRoot := f.srv.FS.GetRoot()
|
||||
node, err := f.findDir(absRoot, f.root)
|
||||
//log.Printf("findRoot findDir %p %v", node, err)
|
||||
|
||||
@@ -118,7 +118,7 @@ type Fs struct {
|
||||
filetype string // dir, file or symlink
|
||||
dirscreated map[string]bool // if implicit dir has been created already
|
||||
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
|
||||
statcache map[string][]File // cache successfull stat requests
|
||||
statcache map[string][]File // cache successful stat requests
|
||||
statcacheMutex sync.RWMutex // RWMutex to protect statcache
|
||||
}
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
@@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.filetype == "" {
|
||||
// This happens in two scenarios.
|
||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||
err := f.initFs(ctx, dir)
|
||||
if err != nil {
|
||||
|
||||
@@ -70,7 +70,7 @@ type Drive struct {
|
||||
Quota Quota `json:"quota"`
|
||||
}
|
||||
|
||||
// Timestamp represents represents date and time information for the
|
||||
// Timestamp represents date and time information for the
|
||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||
type Timestamp time.Time
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ const (
|
||||
timeFormat = `"` + time.RFC1123Z + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// Time represents date and time information for the
|
||||
// pcloud API, by using RFC1123Z
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -2009,7 +2009,7 @@ See [the time option docs](/docs/#time-option) for valid formats.
|
||||
Help: `If set this will decompress gzip encoded objects.
|
||||
|
||||
It is possible to upload objects to S3 with "Content-Encoding: gzip"
|
||||
set. Normally rclone will download these files files as compressed objects.
|
||||
set. Normally rclone will download these files as compressed objects.
|
||||
|
||||
If this flag is set then rclone will decompress these files with
|
||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||
@@ -5199,7 +5199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
var head s3.HeadObjectOutput
|
||||
//structs.SetFrom(&head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
|
||||
@@ -78,7 +78,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("GzipEncoding", func(t *testing.T) {
|
||||
// Test that the gziped file we uploaded can be
|
||||
// Test that the gzipped file we uploaded can be
|
||||
// downloaded with and without decompression
|
||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||
@@ -116,7 +116,7 @@ func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontests checks the received object
|
||||
// PutTestcontents checks the received object
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) {
|
||||
want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"},
|
||||
{"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
|
||||
{"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
|
||||
{"0123456789", "0123456789"},
|
||||
{"abc/ABC/123", "abc/ABC/123"},
|
||||
{" ", "%20%20%20"},
|
||||
|
||||
@@ -80,7 +80,7 @@ type UploadInfo struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// UploadResponse is the respnse to a successful upload
|
||||
// UploadResponse is the response to a successful upload
|
||||
type UploadResponse struct {
|
||||
Files []struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -163,7 +163,7 @@ func (f *Fs) splitPathFull(pth string) (string, string) {
|
||||
return "//" + fullPath[:i], fullPath[i+1:]
|
||||
}
|
||||
|
||||
// splitPath is modified splitPath version that doesn't include the seperator
|
||||
// splitPath is modified splitPath version that doesn't include the separator
|
||||
// in the base path
|
||||
func (f *Fs) splitPath(pth string) (string, string) {
|
||||
// chop of any leading or trailing '/'
|
||||
@@ -479,7 +479,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
// yes it does take take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||
|
||||
// create upload request
|
||||
opts := rest.Opts{
|
||||
@@ -757,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
if err != nil {
|
||||
return fmt.Errorf("dirmove: source not found: %w", err)
|
||||
}
|
||||
// check if the destination allready exists
|
||||
// check if the destination already exists
|
||||
dstPath := f.dirPath(dstRemote)
|
||||
_, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1})
|
||||
if err == nil {
|
||||
@@ -782,7 +782,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
needMove := srcBase != dstBase
|
||||
|
||||
// if we have to rename we'll have to use a temporary name since
|
||||
// there could allready be a directory with the same name as the src directory
|
||||
// there could already be a directory with the same name as the src directory
|
||||
if needRename {
|
||||
// rename to a temporary name
|
||||
tmpName := "rcloneTemp" + random.String(8)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for Zoho
|
||||
// Time represents date and time information for Zoho
|
||||
// Zoho uses milliseconds since unix epoch (Java currentTimeMillis)
|
||||
type Time time.Time
|
||||
|
||||
|
||||
@@ -150,8 +150,8 @@ func init() {
|
||||
return workspace.ID, workspace.Attributes.Name
|
||||
})
|
||||
case "workspace_end":
|
||||
worksspaceID := config.Result
|
||||
m.Set(configRootID, worksspaceID)
|
||||
workspaceID := config.Result
|
||||
m.Set(configRootID, workspaceID)
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
@@ -1264,7 +1264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
// upload was successfull, need to delete old object before rename
|
||||
// upload was successful, need to delete old object before rename
|
||||
if err = o.Remove(ctx); err != nil {
|
||||
return fmt.Errorf("failed to remove old object: %w", err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user