1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-11 05:53:27 +00:00

Compare commits

...

14 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
6a86ec70ea Added support for Max Pages
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2020-02-10 14:26:58 +05:30
Nick Craig-Wood
37c55972e3 vfs: add --vfs-read-wait and --vfs-write-wait flags
--vfs-read-wait duration    Time to wait for in-sequence read before seeking. (default 5ms)
    --vfs-write-wait duration   Time to wait for in-sequence write before giving error. (default 1s)

See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156
2020-02-10 10:30:20 +05:30
Nick Craig-Wood
91aef66881 mount: add --async-read flag to disable asynchronous reads
See: https://forum.rclone.org/t/constantly-high-iowait-add-log/14156
2020-02-10 10:30:20 +05:30
Nick Craig-Wood
90377f5e65 s3: Specify that Minio supports URL encoding in listings
Thanks to @harshavardhana for pointing this out

See #3934 for background
2020-02-09 12:03:20 +00:00
Lars Lehtonen
f1d9bd5eab lib/oauthutil: replace deprecated oauth2.NoContext 2020-02-07 17:49:29 +00:00
Lars Lehtonen
4ee3c21a9d cmd/serve/ftp: replace deprecated os.SEEK_SET with io.SeekStart 2020-02-06 10:58:34 +00:00
Lars Lehtonen
fe6f4135b4 fs/rc: fix dropped error 2020-02-04 11:31:06 +00:00
Nick Craig-Wood
3dfa63b85c onedrive: fix occasional 416 errors on multipart uploads
Before this change, when uploading multipart files, onedrive would
sometimes return an unexpected 416 error and rclone would abort the
transfer.

This is usually after a 500 error which caused rclone to do a retry.

This change checks the upload position on a 416 error and works how
much of the current chunk to skip, then retries (or skips) the current
chunk as appropriate.

If the position is before the current chunk or after the current chunk
then rclone will abort the transfer.

See: https://forum.rclone.org/t/fragment-overlap-error-with-encrypted-onedrive/14001

Fixes #3131
2020-02-01 21:15:07 +00:00
Gary Kim
ff2343475a docs: Update README.md shields for changed CI
Signed-off-by: Gary Kim <gary@garykim.dev>
2020-02-01 20:17:40 +00:00
Nick Craig-Wood
bffd7f0f14 docs: note how to use GitHub's online editor to edit rclone's docs 2020-02-01 13:44:03 +00:00
Nick Craig-Wood
7c55fafe33 Add Durval Menezes to contributors 2020-02-01 13:41:47 +00:00
Nick Craig-Wood
2e7fe06beb Add Dave Koston to contributors 2020-02-01 13:41:47 +00:00
Durval Menezes
8ff91ff31b docs: Update the "Making your own client_id" section for drive
...so it accurately describes the new "Enhanced Security" Google process to get your own Client ID and Client Secret to use with rclone.
2020-02-01 13:41:18 +00:00
Nick Craig-Wood
4d1c616e97 Start v1.51.0-DEV development 2020-02-01 12:32:21 +00:00
27 changed files with 204 additions and 30 deletions

View File

@@ -203,6 +203,9 @@ don't need to run these when adding a feature.
Documentation for rclone sub commands is with their code, eg
`cmd/ls/ls.go`.
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
for small changes in the docs which makes it very easy.
## Making a release ##
There are separate instructions for making a release in the RELEASE.md

View File

@@ -8,10 +8,7 @@
[Installation](https://rclone.org/install/) |
[Forum](https://forum.rclone.org/)
[![Build Status](https://travis-ci.org/rclone/rclone.svg?branch=master)](https://travis-ci.org/rclone/rclone)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/github/rclone/rclone?branch=master&passingText=windows%20-%20ok&svg=true)](https://ci.appveyor.com/project/rclone/rclone)
[![Build Status](https://dev.azure.com/rclone/rclone/_apis/build/status/rclone.rclone?branchName=master)](https://dev.azure.com/rclone/rclone/_build/latest?definitionId=2&branchName=master)
[![CircleCI](https://circleci.com/gh/rclone/rclone/tree/master.svg?style=svg)](https://circleci.com/gh/rclone/rclone/tree/master)
[![Build Status](https://github.com/rclone/rclone/workflows/build/badge.svg)](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
[![Go Report Card](https://goreportcard.com/badge/github.com/rclone/rclone)](https://goreportcard.com/report/github.com/rclone/rclone)
[![GoDoc](https://godoc.org/github.com/rclone/rclone?status.svg)](https://godoc.org/github.com/rclone/rclone)
[![Docker Pulls](https://img.shields.io/docker/pulls/rclone/rclone)](https://hub.docker.com/r/rclone/rclone)

View File

@@ -22,7 +22,7 @@ This file describes how to make the various kinds of releases
* git commit -a -v -m "Version v1.XX.0"
* make retag
* git push --tags origin master
* # Wait for the appveyor and travis builds to complete then...
* # Wait for the GitHub builds to complete then...
* make fetch_binaries
* make tarball
* make sign_upload

View File

@@ -1540,21 +1540,74 @@ func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (re
return response, err
}
// getPosition gets the current position in a multipart upload
func (o *Object) getPosition(ctx context.Context, url string) (pos int64, err error) {
opts := rest.Opts{
Method: "GET",
RootURL: url,
}
var info api.UploadFragmentResponse
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &info)
return shouldRetry(resp, err)
})
if err != nil {
return 0, err
}
if len(info.NextExpectedRanges) != 1 {
return 0, errors.Errorf("bad number of ranges in upload position: %v", info.NextExpectedRanges)
}
position := info.NextExpectedRanges[0]
i := strings.IndexByte(position, '-')
if i < 0 {
return 0, errors.Errorf("no '-' in next expected range: %q", position)
}
position = position[:i]
pos, err = strconv.ParseInt(position, 10, 64)
if err != nil {
return 0, errors.Wrapf(err, "bad expected range: %q", position)
}
return pos, nil
}
// uploadFragment uploads a part
func (o *Object) uploadFragment(ctx context.Context, url string, start int64, totalSize int64, chunk io.ReadSeeker, chunkSize int64) (info *api.Item, err error) {
opts := rest.Opts{
Method: "PUT",
RootURL: url,
ContentLength: &chunkSize,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start, start+chunkSize-1, totalSize),
Body: chunk,
}
// var response api.UploadFragmentResponse
var resp *http.Response
var body []byte
var skip = int64(0)
err = o.fs.pacer.Call(func() (bool, error) {
_, _ = chunk.Seek(0, io.SeekStart)
toSend := chunkSize - skip
opts := rest.Opts{
Method: "PUT",
RootURL: url,
ContentLength: &toSend,
ContentRange: fmt.Sprintf("bytes %d-%d/%d", start+skip, start+chunkSize-1, totalSize),
Body: chunk,
}
_, _ = chunk.Seek(skip, io.SeekStart)
resp, err = o.fs.srv.Call(ctx, &opts)
if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
pos, posErr := o.getPosition(ctx, url)
if posErr != nil {
fs.Debugf(o, "Failed to read position: %v", posErr)
return false, posErr
}
skip = pos - start
fs.Debugf(o, "Read position %d, chunk is %d..%d, bytes to skip = %d", pos, start, start+chunkSize, skip)
switch {
case skip < 0:
return false, errors.Wrapf(err, "sent block already (skip %d < 0), can't rewind", skip)
case skip > chunkSize:
return false, errors.Wrapf(err, "position is in the future (skip %d > chunkSize %d), can't skip forward", skip, chunkSize)
case skip == chunkSize:
fs.Debugf(o, "Skipping chunk as already sent (skip %d == chunkSize %d)", skip, chunkSize)
return false, nil
}
return true, errors.Wrapf(err, "retry this chunk skipping %d bytes", skip)
}
if err != nil {
return shouldRetry(resp, err)
}

View File

@@ -1338,7 +1338,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba")
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{

View File

@@ -32,12 +32,14 @@ func mountOptions(device string) (options []fuse.MountOption) {
fuse.Subtype("rclone"),
fuse.FSName(device),
fuse.VolumeName(mountlib.VolumeName),
fuse.AsyncRead(),
// Options from benchmarking in the fuse module
//fuse.MaxReadahead(64 * 1024 * 1024),
//fuse.WritebackCache(),
}
if mountlib.AsyncRead {
options = append(options, fuse.AsyncRead())
}
if mountlib.NoAppleDouble {
options = append(options, fuse.NoAppleDouble())
}
@@ -71,6 +73,9 @@ func mountOptions(device string) (options []fuse.MountOption) {
if len(mountlib.ExtraFlags) > 0 {
fs.Errorf(nil, "--fuse-flag not supported with this FUSE backend")
}
if vfsflags.Opt.MaxPages != 0 {
options = append(options, fuse.MaxPages(vfsflags.Opt.MaxPages))
}
return options
}

View File

@@ -36,6 +36,7 @@ var (
NoAppleDouble = true // use noappledouble by default
NoAppleXattr = false // do not use noapplexattr by default
DaemonTimeout time.Duration // OSXFUSE only
AsyncRead = true // do async reads by default
)
// Global constants
@@ -318,6 +319,7 @@ be copied to the vfs cache before opening with --vfs-cache-mode full.
flags.BoolVarP(cmdFlags, &Daemon, "daemon", "", Daemon, "Run mount as a daemon (background mode).")
flags.StringVarP(cmdFlags, &VolumeName, "volname", "", VolumeName, "Set the volume name (not supported by all OSes).")
flags.DurationVarP(cmdFlags, &DaemonTimeout, "daemon-timeout", "", DaemonTimeout, "Time limit for rclone to respond to kernel (not supported by all OSes).")
flags.BoolVarP(cmdFlags, &AsyncRead, "async-read", "", AsyncRead, "Use asynchronous reads.")
if runtime.GOOS == "darwin" {
flags.BoolVarP(cmdFlags, &NoAppleDouble, "noappledouble", "", NoAppleDouble, "Sets the OSXFUSE option noappledouble.")

View File

@@ -442,7 +442,7 @@ func (d *Driver) GetFile(path string, offset int64) (size int64, fr io.ReadClose
if err != nil {
return 0, nil, err
}
_, err = handle.Seek(offset, os.SEEK_SET)
_, err = handle.Seek(offset, io.SeekStart)
if err != nil {
return 0, nil, err
}

View File

@@ -334,3 +334,5 @@ Contributors
* unbelauscht <58393353+unbelauscht@users.noreply.github.com>
* Motonori IWAMURO <vmi@nifty.com>
* Benjapol Worakan <benwrk@live.com>
* Dave Koston <dave.koston@stackpath.com>
* Durval Menezes <DurvalMenezes@users.noreply.github.com>

View File

@@ -1031,14 +1031,34 @@ be the same account as the Google Drive you want to access)
"Google Drive API".
4. Click "Credentials" in the left-side panel (not "Create
credentials", which opens the wizard), then "Create credentials", then
"OAuth client ID". It will prompt you to set the OAuth consent screen
product name, if you haven't set one already.
credentials", which opens the wizard), then "Create credentials"
5. Choose an application type of "other", and click "Create". (the
5. If you already configured an "Oauth Consent Screen", then skip
to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
(near the top right corner of the right panel), then select "External"
and click on "CREATE"; on the next screen, enter an "Application name"
("rclone" is OK) then click on "Save" (all other data is optional).
Click again on "Credentials" on the left panel to go back to the
"Credentials" screen.
(PS: if you are a GSuite user, you could also select "Internal" instead
of "External" above, but this has not been tested/documented so far).
6. Click on the "+ CREATE CREDENTIALS" button at the top of the screen,
then select "OAuth client ID".
7. Choose an application type of "other", and click "Create". (the
default name is fine)
6. It will show you a client ID and client secret. Use these values
8. It will show you a client ID and client secret. Use these values
in rclone config to add a new remote or edit an existing remote.
Be aware that, due to the "enhanced security" recently introduced by
Google, you are theoretically expected to "submit your app for verification"
and then wait a few weeks(!) for their response; in practice, you can go right
ahead and use the client ID and client secret with rclone, the only issue will
be a very scary confirmation screen shown when you connect via your browser
for rclone to be able to get its token-id (but as this only happens during
the remote configuration, it's not such a big deal).
(Thanks to @balazer on github for these instructions.)

View File

@@ -98,6 +98,14 @@ func IntVarP(flags *pflag.FlagSet, p *int, name, shorthand string, value int, us
setDefaultFromEnv(flags, name)
}
// Uint16VarP defines a flag which can be overridden by an environment variable
//
// It is a thin wrapper around pflag.Uint16VarP
func Uint16VarP(flags *pflag.FlagSet, p *uint16, name, shorthand string, value uint16, usage string) {
flags.Uint16VarP(p, name, shorthand, value, usage)
setDefaultFromEnv(flags, name)
}
// Uint32VarP defines a flag which can be overridden by an environment variable
//
// It is a thin wrapper around pflag.Uint32VarP

View File

@@ -44,6 +44,9 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
extractPath := filepath.Join(cachePath, "current")
extractPathExist, extractPathStat, err := exists(extractPath)
if err != nil {
return err
}
if extractPathExist && !extractPathStat.IsDir() {
return errors.New("Web GUI path exists, but is a file instead of folder. Please check the path " + extractPath)

View File

@@ -1,4 +1,4 @@
package fs
// Version of rclone
var Version = "v1.51.0"
var Version = "v1.51.0-DEV"

View File

@@ -503,7 +503,7 @@ func doConfig(id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, o
}
// Exchange the code for a token
token, err := oauthConfig.Exchange(oauth2.NoContext, auth.Code)
token, err := oauthConfig.Exchange(context.Background(), auth.Code)
if err != nil {
return errors.Wrap(err, "failed to get token")
}

18
vendor/bazil.org/fuse/fuse.go generated vendored
View File

@@ -156,7 +156,8 @@ func (e *MountpointDoesNotExistError) Error() string {
// progress.
func Mount(dir string, options ...MountOption) (*Conn, error) {
conf := mountConfig{
options: make(map[string]string),
options: make(map[string]string),
maxPages: 32,
}
for _, option := range options {
if err := option(&conf); err != nil {
@@ -238,10 +239,20 @@ func initMount(c *Conn, conf *mountConfig) error {
MaxWrite: maxWrite,
Flags: InitBigWrites | conf.initFlags,
}
setMaxPages(r, s, conf)
r.Respond(s)
return nil
}
func setMaxPages(request *InitRequest, response *InitResponse, conf *mountConfig) {
if (request.Flags & InitMaxPages) == InitMaxPages {
response.Flags |= InitMaxPages
response.MaxPages = conf.maxPages
}
}
// A Request represents a single FUSE request received from the kernel.
// Use a type switch to determine the specific kind.
// A request of unrecognized type will have concrete type *Header.
@@ -1229,6 +1240,9 @@ type InitResponse struct {
// Maximum size of a single write operation.
// Linux enforces a minimum of 4 KiB.
MaxWrite uint32
// Maximum number of pages in a single write operation.
// Linux enforces a minimum of 32.
MaxPages uint16
}
func (r *InitResponse) String() string {
@@ -1244,12 +1258,14 @@ func (r *InitRequest) Respond(resp *InitResponse) {
out.MaxReadahead = resp.MaxReadahead
out.Flags = uint32(resp.Flags)
out.MaxWrite = resp.MaxWrite
out.MaxPages = resp.MaxPages
// MaxWrite larger than our receive buffer would just lead to
// errors on large writes.
if out.MaxWrite > maxWrite {
out.MaxWrite = maxWrite
}
r.respond(buf)
}

View File

@@ -274,6 +274,8 @@ const (
InitWritebackCache InitFlags = 1 << 16
InitNoOpenSupport InitFlags = 1 << 17
InitMaxPages InitFlags = 1 << 22 // Linux only
InitCaseSensitive InitFlags = 1 << 29 // OS X only
InitVolRename InitFlags = 1 << 30 // OS X only
InitXtimes InitFlags = 1 << 31 // OS X only
@@ -710,6 +712,8 @@ type initOut struct {
Flags uint32
Unused uint32
MaxWrite uint32
_ uint32 // Unused, refers to TimeGran
MaxPages uint16
}
type interruptIn struct {

View File

@@ -4,4 +4,6 @@ package fuse
//
// Linux 4.2.0 has been observed to cap this value at 128kB
// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages).
const maxWrite = 128 * 1024
// From Linux 4.20, the cap has been increased to 1MiB
// (FUSE_MAX_PAGES_PER_REQ=256, 4kB pages).
const maxWrite = 1 * 1024 * 1024 // 1 MiB

9
vendor/bazil.org/fuse/options.go generated vendored
View File

@@ -16,6 +16,7 @@ type mountConfig struct {
maxReadahead uint32
initFlags InitFlags
osxfuseLocations []OSXFUSEPaths
maxPages uint16
}
func escapeComma(s string) string {
@@ -317,3 +318,11 @@ func AllowNonEmptyMount() MountOption {
return nil
}
}
// MaxPages enables the configuration of the maximum number of pages
// in the request & response from the kernel.
//
// Linux only. Others ignore this option.
func MaxPages(count uint16) MountOption {
return maxPages(count)
}

View File

@@ -38,3 +38,7 @@ func noBrowse(conf *mountConfig) error {
conf.options["nobrowse"] = ""
return nil
}
func maxPages(count uint16) MountOption {
return dummyOption
}

View File

@@ -30,3 +30,7 @@ func exclCreate(conf *mountConfig) error {
func noBrowse(conf *mountConfig) error {
return nil
}
func maxPages(count uint16) MountOption {
return dummyOption
}

View File

@@ -27,3 +27,13 @@ func exclCreate(conf *mountConfig) error {
func noBrowse(conf *mountConfig) error {
return nil
}
func maxPages(count uint16) MountOption {
return func(conf *mountConfig) error {
if count > 256 || count == 0 {
count = 256
}
conf.maxPages = count
return nil
}
}

View File

@@ -232,9 +232,10 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) {
// Set a background timer so we don't wait for long
// Waits here potentially affect all seeks so need to keep them short
// This time here was made by finding the smallest when mounting a local backend
// that didn't cause seeks.
const maxWait = 5 * time.Millisecond
// The default time here was made by finding the
// smallest when mounting a local backend that didn't
// cause seeks.
maxWait := fh.file.d.vfs.Opt.ReadWait
timeout := time.NewTimer(maxWait)
done := make(chan struct{})
abort := int32(0)

View File

@@ -53,6 +53,9 @@ var DefaultOpt = Options{
ChunkSizeLimit: -1,
CacheMaxSize: -1,
CaseInsensitive: runtime.GOOS == "windows" || runtime.GOOS == "darwin", // default to true on Windows and Mac, false otherwise
WriteWait: 1000 * time.Millisecond,
ReadWait: 5 * time.Millisecond,
MaxPages: 0, // Only for Linux
}
// Node represents either a directory (*Dir) or a file (*File)
@@ -202,6 +205,9 @@ type Options struct {
CacheMaxSize fs.SizeSuffix
CachePollInterval time.Duration
CaseInsensitive bool
WriteWait time.Duration // time to wait for in-sequence write
ReadWait time.Duration // time to wait for in-sequence read
MaxPages uint16
}
// New creates a new VFS and root directory. If opt is nil, then

View File

@@ -33,5 +33,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.FVarP(flagSet, DirPerms, "dir-perms", "", "Directory permissions")
flags.FVarP(flagSet, FilePerms, "file-perms", "", "File permissions")
flags.BoolVarP(flagSet, &Opt.CaseInsensitive, "vfs-case-insensitive", "", Opt.CaseInsensitive, "If a file name not found, find a case insensitive match.")
flags.DurationVarP(flagSet, &Opt.WriteWait, "vfs-write-wait", "", Opt.WriteWait, "Time to wait for in-sequence write before giving error.")
flags.DurationVarP(flagSet, &Opt.ReadWait, "vfs-read-wait", "", Opt.ReadWait, "Time to wait for in-sequence read before seeking.")
platformFlags(flagSet)
}

View File

@@ -0,0 +1,22 @@
// +build linux
package vfsflags
import (
"github.com/rclone/rclone/fs/config/flags"
"github.com/spf13/pflag"
"golang.org/x/sys/unix"
)
// add any extra platform specific flags
func platformFlags(flagSet *pflag.FlagSet) {
flags.IntVarP(flagSet, &Opt.Umask, "umask", "", Opt.Umask, "Override the permission bits set by the filesystem.")
Opt.Umask = unix.Umask(0) // read the umask
unix.Umask(Opt.Umask) // set it back to what it was
Opt.UID = uint32(unix.Geteuid())
Opt.GID = uint32(unix.Getegid())
flags.Uint32VarP(flagSet, &Opt.UID, "uid", "", Opt.UID, "Override the uid field set by the filesystem.")
flags.Uint32VarP(flagSet, &Opt.GID, "gid", "", Opt.GID, "Override the gid field set by the filesystem.")
flags.Uint16VarP(flagSet, &Opt.MaxPages, "max-pages", "", Opt.MaxPages, "Set the Max Pages to be passed onto the FUSE library")
}

View File

@@ -1,4 +1,4 @@
// +build linux darwin freebsd
// +build darwin freebsd
package vfsflags

View File

@@ -132,7 +132,8 @@ func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) {
}
if fh.offset != off {
// Set a background timer so we don't wait forever
timeout := time.NewTimer(10 * time.Second)
maxWait := fh.file.d.vfs.Opt.WriteWait
timeout := time.NewTimer(maxWait)
done := make(chan struct{})
abort := int32(0)
go func() {