1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-06 00:03:32 +00:00

Compare commits

...

16 Commits

Author SHA1 Message Date
Nick Craig-Wood
c741c02fb6 dropbox: make sure we wait for previous batch to finish before starting a new one 2020-09-08 15:03:31 +01:00
Nick Craig-Wood
56e8d75cab dropbox: add --dropbox-batch flag to speed up uploading of lots of small files 2020-09-08 15:03:31 +01:00
Nick Craig-Wood
e74f5b8906 dropbox: raise priority of rate limited message to INFO to make it more noticeable
If you exceed rate limits, dropbox tells you to wait for 300 seconds -
this is rather a long time for the user to be waiting for rclone to
finish, so emit a NOTICE level log instead of a DEBUG.
2020-09-08 15:03:31 +01:00
Nick Craig-Wood
e4a7686444 accounting: remove new line from end of --stats-one-line display 2020-09-08 15:03:23 +01:00
Tim Gallant
c3884aafd9 drive: adds special oauth help test - fixes #4555 2020-09-07 12:48:46 +01:00
Nick Craig-Wood
0a9785a4ff build: don't explicitly set ARM version to fix ARMv5 build #4553
This partially reverts commit f71f6c57d7.
2020-09-07 12:39:26 +01:00
Nick Craig-Wood
8140f67092 check: fix docs
See: https://forum.rclone.org/t/possible-issue-with-documention/18926
2020-09-07 12:10:52 +01:00
Nick Craig-Wood
4a001b8a02 check: add back missing --download flag - fixes #4565
This was accidentally removed when refactoring check and cryptcheck in

8b6f2bbb4b check,cryptcheck: add reporting of filenames for same/missing/changed #3264
2020-09-05 09:29:35 +01:00
Nick Craig-Wood
525433e6dd build: fix "Illegal instruction" error for ARMv6 builds - fixes #4553
Before this change we used `go build -i` to build the releases in parallel.

However this causes the ARMv6 and ARMv7 build to get mixed up somehow,
causing an illegal instruction when running rclone binaries on ARMv6.

See go bug: https://github.com/golang/go/issues/41223

This removes the -i which should have no effect on build times on the
CI and appears to fix the problem.
2020-09-04 16:30:50 +01:00
Nick Craig-Wood
f71f6c57d7 build: explicitly set ARM version to fix build #4553 2020-09-04 16:30:50 +01:00
albertony
e35623c72e docs/jottacloud: mention that uploads from local disk will not need to cache files to disk for md5 calculation 2020-09-04 00:04:09 +02:00
Nick Craig-Wood
344bce7e2a docs: fix formatting of rc docs page
See: https://forum.rclone.org/t/rclone-1-53-release/18880/24
2020-09-03 11:53:24 +01:00
Nick Craig-Wood
3a4322a7ba build: update build for stable branch 2020-09-03 11:30:23 +01:00
Nick Craig-Wood
27b9ae4fc3 vfs: fix spurious error "vfs cache: failed to _ensure cache EOF"
Before this change the error message was produced for every file which
was confusing users.

After this change we check for EOF and return from ReadAt at that
point.

See: https://forum.rclone.org/t/rclone-1-53-release/18880/10
2020-09-03 10:25:00 +01:00
Nick Craig-Wood
7e2488af10 build: include vendor tar ball in release and fix startdev 2020-09-02 17:53:05 +01:00
Nick Craig-Wood
41ecb586c4 Start v1.54.0-DEV development 2020-09-02 17:52:58 +01:00
15 changed files with 324 additions and 76 deletions

View File

@@ -8,7 +8,8 @@ VERSION := $(shell cat VERSION)
# Last tag on this branch
LAST_TAG := $(shell git describe --tags --abbrev=0)
# Next version
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
NEXT_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2+1,0}')
NEXT_PATCH_VERSION := $(shell echo $(VERSION) | awk -F. -v OFS=. '{print $$1,$$2,$$3+1}')
# If we are working on a release, override branch to master
ifdef RELEASE_TAG
BRANCH := master
@@ -164,6 +165,11 @@ validate_website: website
tarball:
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
vendorball:
go mod vendor
tar -zcf build/rclone-$(TAG)-vendor.tar.gz vendor
rm -rf vendor
sign_upload:
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
@@ -239,7 +245,15 @@ startdev:
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_VERSION)" > VERSION
git commit -m "Start $(VERSION)-DEV development" fs/version.go
git commit -m "Start $(NEXT_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
startstable:
@echo "Version is $(VERSION)"
@echo "Next stable version is $(NEXT_PATCH_VERSION)"
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_PATCH_VERSION)-DEV\"\n" | gofmt > fs/version.go
echo -n "$(NEXT_PATCH_VERSION)" > docs/layouts/partials/version.html
echo "$(NEXT_PATCH_VERSION)" > VERSION
git commit -m "Start $(NEXT_PATCH_VERSION)-DEV development" fs/version.go VERSION docs/layouts/partials/version.html
winzip:
zip -9 rclone-$(TAG).zip rclone.exe

View File

@@ -9,7 +9,7 @@ This file describes how to make the various kinds of releases
## Making a release
* git checkout master
* git checkout master # see below for stable branch
* git pull
* git status - make sure everything is checked in
* Check GitHub actions build for master is Green
@@ -25,12 +25,13 @@ This file describes how to make the various kinds of releases
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make vendorball
* make sign_upload
* make check_sign
* make upload
* make upload_website
* make upload_github
* make startdev
* make startdev # make startstable for stable branch
* # announce with forum post, twitter post, patreon post
Early in the next release cycle update the dependencies
@@ -41,62 +42,35 @@ Early in the next release cycle update the dependencies
* git add new files
* git commit -a -v
If `make update` fails with errors like this:
```
# github.com/cpuguy83/go-md2man/md2man
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:11:16: undefined: blackfriday.EXTENSION_NO_INTRA_EMPHASIS
../../../../pkg/mod/github.com/cpuguy83/go-md2man@v1.0.8/md2man/md2man.go:12:16: undefined: blackfriday.EXTENSION_TABLES
```
Can be fixed with
* GO111MODULE=on go get -u github.com/russross/blackfriday@v1.5.2
* GO111MODULE=on go mod tidy
## Making a point release
If rclone needs a point release due to some horrendous bug:
First make the release branch. If this is a second point release then
this will be done already.
Set vars
* BASE_TAG=v1.XX # eg v1.52
* NEW_TAG=${BASE_TAG}.Y # eg v1.52.1
* echo $BASE_TAG $NEW_TAG # v1.52 v1.52.1
First make the release branch. If this is a second point release then
this will be done already.
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now
* FIXME this is now broken with new semver layout - needs fixing
* FIXME the TAG=${NEW_TAG} shouldn't be necessary any more
* git co ${BASE_TAG}-stable
* git cherry-pick any fixes
* Test (see above)
* make NEXT_VERSION=${NEW_TAG} tag
* edit docs/content/changelog.md
* make TAG=${NEW_TAG} doc
* git commit -a -v -m "Version ${NEW_TAG}"
* git tag -d ${NEW_TAG}
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
* git push --tags -u origin ${BASE_TAG}-stable
* Wait for builds to complete
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
* make TAG=${NEW_TAG} tarball
* make TAG=${NEW_TAG} sign_upload
* make TAG=${NEW_TAG} check_sign
* make TAG=${NEW_TAG} upload
* make TAG=${NEW_TAG} upload_website
* make TAG=${NEW_TAG} upload_github
* NB this overwrites the current beta so we need to do this
* Do the steps as above
* make startstable
* NB this overwrites the current beta so we need to do this - FIXME is this true any more?
* git co master
* make VERSION=${NEW_TAG} startdev
* # cherry pick the changes to the changelog and VERSION
* git checkout ${BASE_TAG}-stable VERSION docs/content/changelog.md
* git commit --amend
* # cherry pick the changes to the changelog
* git checkout ${BASE_TAG}-stable docs/content/changelog.md
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
* Announce!
## Making a manual build of docker

View File

@@ -1 +1 @@
v1.53.0
v1.54.0

View File

@@ -157,6 +157,17 @@ func driveScopesContainsAppFolder(scopes []string) bool {
return false
}
func driveOAuthOptions() []fs.Option {
opts := []fs.Option{}
for _, opt := range oauthutil.SharedOptions {
if opt.Name == config.ConfigClientID {
opt.Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
}
opts = append(opts, opt)
}
return opts
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -192,7 +203,7 @@ func init() {
log.Fatalf("Failed to configure team drive: %v", err)
}
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
Help: "Scope that rclone should use when requesting access from drive.",
Examples: []fs.OptionExample{{

View File

@@ -22,6 +22,7 @@ of path_display and all will be well.
*/
import (
"bytes"
"context"
"fmt"
"io"
@@ -29,9 +30,11 @@ import (
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
@@ -47,6 +50,7 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -61,6 +65,7 @@ const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
maxBatchSize = 1000
// Upload chunk size - setting too small makes uploads slow.
// Chunks are buffered into memory for retries.
//
@@ -142,6 +147,23 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
Name: "batch",
Help: `Enable batching of files if non-zero.
This sets the batch size of files to upload. It has to be less than 1000. A
sensible setting is probably 1000 if you are using this feature.
Rclone will close any outstanding batches when it exits.
Setting this is a great idea if you are uploading lots of small files as it will
make them a lot quicker. You can use --transfers 32 to maximise throughput.
It has the downside that rclone can't check the hash of the file after upload,
so using "rclone check" after the transfer completes is recommended.
`,
Default: 0,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -163,6 +185,7 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Batch int `config:"batch"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -180,6 +203,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -195,6 +219,165 @@ type Object struct {
// ------------------------------------------------------------
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mu sync.Mutex // lock for vars below
commitMu sync.Mutex // lock for waiting for batch
maxBatch int // maximum size for batch
active int // number of batches being sent
items []*files.UploadSessionFinishArg // current uncommitted files
atexit atexit.FnHandle // atexit handle
}
// newBatcher creates a new batcher structure
func newBatcher(f *Fs, maxBatch int) *batcher {
return &batcher{
f: f,
maxBatch: maxBatch,
}
}
// Start starts adding an item to a batch returning true if it was
// successfully started
//
// This should be paired with End
func (b *batcher) Start() bool {
if b.maxBatch <= 0 {
return false
}
b.mu.Lock()
defer b.mu.Unlock()
b.active++
// FIXME set a timer or something
return true
}
// End ends adding an item
func (b *batcher) End(started bool) error {
if !started {
return nil
}
b.mu.Lock()
defer b.mu.Unlock()
b.active--
if len(b.items) < b.maxBatch {
return nil
}
return b._commit(false)
}
// Waits for the batch to complete - call with batchMu held
func (b *batcher) _waitForBatchResult(res *files.UploadSessionFinishBatchLaunch) (batchResult *files.UploadSessionFinishBatchResult, err error) {
if res.AsyncJobId == "" {
return res.Complete, nil
}
var batchStatus *files.UploadSessionFinishBatchJobStatus
sleepTime := time.Second
const maxTries = 120
for try := 1; try <= maxTries; try++ {
err = b.f.pacer.Call(func() (bool, error) {
batchStatus, err = b.f.srv.UploadSessionFinishBatchCheck(&async.PollArg{
AsyncJobId: res.AsyncJobId,
})
return shouldRetry(err)
})
if err != nil {
fs.Errorf(b.f, "failed to wait for batch: %v", err)
break
}
if batchStatus.Tag == "complete" {
break
}
fs.Debugf(b.f, "sleeping for %v to wait for batch to complete, try %d/%d", sleepTime, try, maxTries)
time.Sleep(sleepTime)
}
return batchStatus.Complete, nil
}
// commit a batch - call with batchMu held
//
// if finalizing is true then it doesn't unregister Finalize as this
// causes a deadlock during finalization.
func (b *batcher) _commit(finalizing bool) (err error) {
b.commitMu.Lock()
batch := "batch"
if finalizing {
batch = "last batch"
}
fs.Debugf(b.f, "comitting %s length %d", batch, len(b.items))
var arg = &files.UploadSessionFinishBatchArg{
Entries: b.items,
}
var res *files.UploadSessionFinishBatchLaunch
err = b.f.pacer.Call(func() (bool, error) {
res, err = b.f.srv.UploadSessionFinishBatch(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
b.commitMu.Unlock()
return err
}
// Clear batch
b.items = nil
// If finalizing, don't unregister or get result
if finalizing {
b.commitMu.Unlock()
return nil
}
// Unregister the atexit since queue is empty
atexit.Unregister(b.atexit)
b.atexit = nil
// Wait for the batch to finish before we proceed in the background
go func() {
defer b.commitMu.Unlock()
_, err = b._waitForBatchResult(res)
if err != nil {
fs.Errorf(b.f, "Error waiting for batch to finish: %v", err)
}
}()
return nil
}
// Add adds a finished item to the batch
func (b *batcher) Add(commitInfo *files.UploadSessionFinishArg) {
fs.Debugf(b.f, "adding %q to batch", commitInfo.Commit.Path)
b.mu.Lock()
defer b.mu.Unlock()
b.items = append(b.items, commitInfo)
if b.atexit == nil {
b.atexit = atexit.Register(b.Finalize)
}
}
// Finalize finishes any pending batches
func (b *batcher) Finalize() {
b.mu.Lock()
defer b.mu.Unlock()
if len(b.items) == 0 {
return
}
err := b._commit(true)
if err != nil {
fs.Errorf(b.f, "Failed to finalize last batch: %v", err)
}
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
@@ -230,7 +413,7 @@ func shouldRetry(err error) (bool, error) {
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Debugf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
fs.Logf(baseErrString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
@@ -273,6 +456,9 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
if err != nil {
return nil, errors.Wrap(err, "dropbox: chunk size")
}
if opt.Batch > maxBatchSize || opt.Batch < 0 {
return nil, errors.Errorf("dropbox: batch must be < %d and >= 0 - it is currently %d", maxBatchSize, opt.Batch)
}
// Convert the old token if it exists. The old token was just
// just a string, the new one is a JSON blob
@@ -297,6 +483,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
opt: *opt,
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.batcher = newBatcher(f, f.opt.Batch)
config := dropbox.Config{
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
Client: oAuthClient, // maybe???
@@ -1044,6 +1231,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
// avoidable request to the Dropbox API that does not carry payload.
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
batching := o.fs.batcher.Start()
defer func() {
batchErr := o.fs.batcher.End(batching)
if err != nil {
err = batchErr
}
}()
chunkSize := int64(o.fs.opt.ChunkSize)
chunks := 0
if size != -1 {
@@ -1057,11 +1251,15 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
fs.Debugf(o, "Streaming chunk %d/%d", cur, cur)
} else if chunks == 0 {
fs.Debugf(o, "Streaming chunk %d/unknown", cur)
} else {
} else if chunks != 1 {
fs.Debugf(o, "Uploading chunk %d/%d", cur, chunks)
}
}
appendArg := files.UploadSessionAppendArg{
Close: chunks == 1,
}
// write the first chunk
fmtChunk(1, false)
var res *files.UploadSessionStartResult
@@ -1071,7 +1269,10 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
return false, nil
}
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
arg := files.UploadSessionStartArg{
Close: appendArg.Close,
}
res, err = o.fs.srv.UploadSessionStart(&arg, chunk)
return shouldRetry(err)
})
if err != nil {
@@ -1082,22 +1283,34 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
SessionId: res.SessionId,
Offset: 0,
}
appendArg := files.UploadSessionAppendArg{
Cursor: &cursor,
Close: false,
}
appendArg.Cursor = &cursor
// write more whole chunks (if any)
// write more whole chunks (if any, and if !batching), if
// batching write the last chunk also.
currentChunk := 2
for {
if chunks > 0 && currentChunk >= chunks {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
} else if chunks == 0 && in.BytesRead()-cursor.Offset < uint64(chunkSize) {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
break
if chunks > 0 {
// Size known
if currentChunk == chunks {
// Last chunk
if !batching {
// if the size is known, only upload full chunks. Remaining bytes are uploaded with
// the UploadSessionFinish request.
break
}
appendArg.Close = true
} else if currentChunk > chunks {
break
}
} else {
// Size unknown
lastReadWasShort := in.BytesRead()-cursor.Offset < uint64(chunkSize)
if lastReadWasShort {
// if the size is unknown, upload as long as we can read full chunks from the reader.
// The UploadSessionFinish request will not contain any payload.
// This is also what we want if batching
break
}
}
cursor.Offset = in.BytesRead()
fmtChunk(currentChunk, false)
@@ -1123,6 +1336,26 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
Cursor: &cursor,
Commit: commitInfo,
}
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if batching {
// If we haven't closed the session then we need to
if !appendArg.Close {
fs.Debugf(o, "Closing session")
var empty bytes.Buffer
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.srv.UploadSessionAppendV2(&appendArg, &empty)
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {
return nil, err
}
}
o.fs.batcher.Add(args)
return nil, nil
}
fmtChunk(currentChunk, true)
chunk = readers.NewRepeatableReaderBuffer(in, buf)
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1165,7 +1398,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
size := src.Size()
var err error
var entry *files.FileMetadata
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
if size > int64(o.fs.opt.ChunkSize) || size == -1 || o.fs.opt.Batch > 0 {
entry, err = o.uploadChunked(in, commitInfo, size)
} else {
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
@@ -1176,6 +1409,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return errors.Wrap(err, "upload failed")
}
// If we haven't received data back from batch upload then fake it
if entry == nil {
o.bytes = size
o.modTime = commitInfo.ClientModified
o.hash = "" // we don't have this
return nil
}
return o.setMetadataFromEntry(entry)
}

View File

@@ -280,7 +280,7 @@ func stripVersion(goarch string) string {
// build the binary in dir returning success or failure
func compileArch(version, goos, goarch, dir string) bool {
log.Printf("Compiling %s/%s", goos, goarch)
log.Printf("Compiling %s/%s into %s", goos, goarch, dir)
output := filepath.Join(dir, "rclone")
if goos == "windows" {
output += ".exe"
@@ -298,7 +298,6 @@ func compileArch(version, goos, goarch, dir string) bool {
"go", "build",
"--ldflags", "-s -X github.com/rclone/rclone/fs.Version=" + version,
"-trimpath",
"-i",
"-o", output,
"-tags", *tags,
"..",

View File

@@ -29,6 +29,7 @@ var (
func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &download, "download", "", download, "Check by downloading rather than with hash.")
AddFlags(cmdFlags)
}
@@ -50,7 +51,7 @@ the source match the files in the destination, not the other way
around. This means that extra files in the destination that are not in
the source will not be detected.
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--src-only|
The |--differ|, |--missing-on-dst|, |--missing-on-src|, |--match|
and |--error| flags write paths, one per line, to the file name (or
stdout if it is |-|) supplied. What they write is described in the
help below. For example |--differ| will write all paths which are

View File

@@ -148,8 +148,13 @@ flag.
Note that Jottacloud requires the MD5 hash before upload so if the
source does not have an MD5 checksum then the file will be cached
temporarily on disk (wherever the `TMPDIR` environment variable points
to) before it is uploaded. Small files will be cached in memory - see
to) before it is uploaded. Small files will be cached in memory - see
the [--jottacloud-md5-memory-limit](#jottacloud-md5-memory-limit) flag.
When uploading from local disk the source checksum is always available,
so this does not apply. Starting with rclone version 1.52 the same is
true for crypted remotes (in older versions the crypt backend would not
calculate hashes for uploads from local disk, so the Jottacloud
backend had to do it as described above).
#### Restricted filename characters

View File

@@ -537,6 +537,8 @@ OR
"result": "<Raw command line output>"
}
```
**Authentication is required for this call.**
### core/gc: Runs a garbage collection. {#core-gc}
@@ -1212,7 +1214,7 @@ This allows you to remove a plugin using it's name
This takes parameters
- name: name of the plugin in the format <author>/<plugin_name>
- name: name of the plugin in the format `author`/`plugin_name`
Eg
@@ -1226,7 +1228,7 @@ This allows you to remove a plugin using it's name
This takes the following parameters
- name: name of the plugin in the format <author>/<plugin_name>
- name: name of the plugin in the format `author`/`plugin_name`
Eg

View File

@@ -1 +1 @@
v1.53.0
v1.54.0

View File

@@ -272,7 +272,7 @@ func (s *StatsInfo) String() string {
}
}
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
dateString,
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
@@ -283,6 +283,7 @@ func (s *StatsInfo) String() string {
)
if !fs.Config.StatsOneLine {
_, _ = buf.WriteRune('\n')
errorDetails := ""
switch {
case s.fatalError:
@@ -291,6 +292,7 @@ func (s *StatsInfo) String() string {
errorDetails = " (retrying may help)"
case s.errors != 0:
errorDetails = " (no need to retry)"
}
// Add only non zero stats

View File

@@ -379,7 +379,7 @@ OR
"result": "<Raw command line output>"
}
` + "```" + `
`,
})
}

View File

@@ -45,7 +45,7 @@ func init() {
This takes the following parameters
- name: name of the plugin in the format <author>/<plugin_name>
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
Eg
@@ -212,7 +212,7 @@ func init() {
This takes parameters
- name: name of the plugin in the format <author>/<plugin_name>
- name: name of the plugin in the format ` + "`author`/`plugin_name`" + `
Eg

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.53.0-DEV"
var Version = "v1.54.0-DEV"

View File

@@ -1158,7 +1158,7 @@ func (item *Item) ReadAt(b []byte, off int64) (n int, err error) {
item.preAccess()
n, err = item.readAt(b, off)
item.postAccess()
if err == nil {
if err == nil || err == io.EOF {
break
}
fs.Errorf(item.name, "vfs cache: failed to _ensure cache %v", err)