1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-01 17:23:39 +00:00

Compare commits

..

13 Commits

Author SHA1 Message Date
Nick Craig-Wood
6071db565c onedrive: work around duplicated directory listing entries
When paging big directories onedrive sometimes duplicates the last
item on one page as the first item of the next page.

This patch detects that and skips the duplicated item with an error
message.

See: https://forum.rclone.org/t/unexpected-duplicates-on-onedrive-with-0s-in-filename/23164
2021-03-31 09:52:47 +01:00
Nick Craig-Wood
d0f8b4f479 fs/cache: fix recreation of backends after they have expired
Before this change, on the first attempt to create a backend we used a
non-canonicalized string. When the backend expired the second attempt
to create it would use the canonicalized string (because it was in the
remap cache) which would fail because it was now `name{XXXX}:`

This change makes sure that whenever we create a backend we always use
the non-canonicalized string.

See: https://forum.rclone.org/t/connection-string-inconsistencies-on-beta/23171
2021-03-30 18:46:30 +01:00
Nick Craig-Wood
58d82a5c73 rc: allow fs= params to be a JSON blob 2021-03-30 17:07:27 +01:00
Nick Craig-Wood
c0c74003f2 fs/cache: add --fs-cache-expire-duration to control the fs cache
This commit makes the previously statically configured fs cache configurable.

It introduces two parameters `--fs-cache-expire-duration` and
`--fs-cache-expire-interval` to control the caching of the items.

It also adds new interfaces to lib/cache to set these.
2021-03-30 12:46:47 +01:00
Nick Craig-Wood
60bc7a079a rc: factor rc.Error out of rcserver for re-use in librclone #4891 2021-03-30 12:46:05 +01:00
Nick Craig-Wood
20c5ca08fb test_all: fix crash when using -clean 2021-03-29 23:12:53 +01:00
Nick Craig-Wood
fc57648b75 lib/rest: fix multipart uploads stopping on context cancel
Before this change when the context was cancelled (due to
--max-duration for example) this could deadlock when uploading
multipart uploads.

This change fixes the problem by introducing another go routine to
monitor the context and close the pipe with an error when the context
errors.
2021-03-29 19:09:47 +01:00
Nick Craig-Wood
8c5c91e68f lib/readers: add NewContextReader to error on context errors 2021-03-29 19:09:47 +01:00
Nick Craig-Wood
9dd39e8524 Add x0b to contributors 2021-03-29 19:09:47 +01:00
albertony
9c9186183d docs: add short description of configuration file format (#5142)
Fixes #572
2021-03-27 17:26:01 +01:00
Nick Craig-Wood
2ccf416e83 build: add version to android builds and fix upload 2021-03-26 09:18:54 +00:00
x0b
5577c7b760 build: replace xgo with NDK for Android builds 2021-03-26 09:18:54 +00:00
Nick Craig-Wood
f6dbb98a1d cmount: update cgofuse to the latest version to bring in macfuse 4 fix 2021-03-25 09:02:17 +00:00
25 changed files with 673 additions and 56 deletions

View File

@@ -14,6 +14,205 @@ on:
pull_request:
jobs:
build:
timeout-minutes: 60
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows_amd64', 'windows_386', 'other_os', 'go1.13', 'go1.14', 'go1.15']
include:
- job_name: linux
os: ubuntu-latest
go: '1.16.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_amd64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
racequicktest: true
deploy: true
- job_name: mac_arm64
os: macOS-latest
go: '1.16.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
os: windows-latest
go: '1.16.x'
gotags: cmount
build_flags: '-include "^windows/amd64" -cgo'
build_args: '-buildmode exe'
quicktest: true
racequicktest: true
deploy: true
- job_name: windows_386
os: windows-latest
go: '1.16.x'
gotags: cmount
goarch: '386'
cgo: '1'
build_flags: '-include "^windows/386" -cgo'
build_args: '-buildmode exe'
quicktest: true
deploy: true
- job_name: other_os
os: ubuntu-latest
go: '1.16.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.13
os: ubuntu-latest
go: '1.13.x'
quicktest: true
- job_name: go1.14
os: ubuntu-latest
go: '1.14.x'
quicktest: true
racequicktest: true
- job_name: go1.15
os: ubuntu-latest
go: '1.15.x'
quicktest: true
racequicktest: true
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
- name: Set environment variables
shell: bash
run: |
echo 'GOTAGS=${{ matrix.gotags }}' >> $GITHUB_ENV
echo 'BUILD_FLAGS=${{ matrix.build_flags }}' >> $GITHUB_ENV
echo 'BUILD_ARGS=${{ matrix.build_args }}' >> $GITHUB_ENV
if [[ "${{ matrix.goarch }}" != "" ]]; then echo 'GOARCH=${{ matrix.goarch }}' >> $GITHUB_ENV ; fi
if [[ "${{ matrix.cgo }}" != "" ]]; then echo 'CGO_ENABLED=${{ matrix.cgo }}' >> $GITHUB_ENV ; fi
- name: Install Libraries on Linux
shell: bash
run: |
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get install fuse libfuse-dev rpm pkg-config
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
shell: bash
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell
run: |
$ProgressPreference = 'SilentlyContinue'
choco install -y winfsp zip
echo "CPATH=C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
if ($env:GOARCH -eq "386") {
choco install -y mingw --forcex86 --force
echo "C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
}
# Copy mingw32-make.exe to make.exe so the same command line
# can be used on Windows as on macOS and Linux
$path = (get-command mingw32-make.exe).Path
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
if: matrix.os == 'windows-latest'
- name: Print Go version and environment
shell: bash
run: |
printf "Using go at: $(which go)\n"
printf "Go version: $(go version)\n"
printf "\n\nGo environment:\n\n"
go env
printf "\n\nRclone environment:\n\n"
make vars
printf "\n\nSystem environment:\n\n"
env
- name: Go module cache
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Build rclone
shell: bash
run: |
make
- name: Run tests
shell: bash
run: |
make quicktest
if: matrix.quicktest
- name: Race test
shell: bash
run: |
make racequicktest
if: matrix.racequicktest
- name: Code quality test
shell: bash
run: |
make build_dep
make check
if: matrix.check
- name: Compile all architectures test
shell: bash
run: |
make
make compile_all
if: matrix.compile_all
- name: Deploy built binaries
shell: bash
run: |
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep_linux ; fi
if [[ "${{ matrix.os }}" == "windows-latest" ]]; then make release_dep_windows ; fi
make ci_beta
env:
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
# working-directory: '$(modulePath)'
# Deploy binaries if enabled in config && not a PR && not a fork
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
android:
timeout-minutes: 30
name: "android-all"

View File

@@ -897,6 +897,7 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", "/children?$top=1000")
lastID := "\x00"
OUTER:
for {
var result api.ListChildrenResponse
@@ -911,6 +912,10 @@ OUTER:
if len(result.Value) == 0 {
break
}
if result.Value[0].ID == lastID {
fs.Errorf(f, "Skipping duplicate entry %q in directory %q", lastID, dirID)
result.Value = result.Value[1:]
}
for i := range result.Value {
item := &result.Value[i]
isFolder := item.GetFolder() != nil
@@ -937,6 +942,9 @@ OUTER:
}
opts.Path = ""
opts.RootURL = result.NextLink
if len(result.Value) > 0 {
lastID = result.Value[len(result.Value)-1].ID
}
}
return
}

View File

@@ -1137,7 +1137,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// opts.Body=0), so upload it as a multipart form POST with
// Content-Length set.
if size == 0 {
formReader, contentType, overhead, err := rest.MultipartUpload(in, opts.Parameters, "content", leaf)
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
if err != nil {
return errors.Wrap(err, "failed to make multipart upload for 0 length file")
}

View File

@@ -682,7 +682,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
"need_idx_progress": {"true"},
"replace": {"1"},
}
formReader, contentType, _, err := rest.MultipartUpload(in, parameters, "file", f.opt.Enc.FromStandardName(filename))
formReader, contentType, _, err := rest.MultipartUpload(ctx, in, parameters, "file", f.opt.Enc.FromStandardName(filename))
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
}

View File

@@ -647,7 +647,7 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
params.Set("filename", name)
params.Set("parent_id", parent)
params.Set("override-name-exist", strconv.FormatBool(true))
formReader, contentType, overhead, err := rest.MultipartUpload(in, nil, "content", name)
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
if err != nil {
return nil, errors.Wrap(err, "failed to make multipart upload")
}

View File

@@ -476,3 +476,4 @@ put them back in again.` >}}
* Naveen Honest Raj <naveendurai19@gmail.com>
* Lucas Messenger <lmesseng@cisco.com>
* Manish Kumar <krmanish260@gmail.com>
* x0b <x0bdev@gmail.com>

View File

@@ -8,8 +8,8 @@ Configure
First, you'll need to configure rclone. As the object storage systems
have quite complicated authentication these are kept in a config file.
(See the `--config` entry for how to find the config file and choose
its location.)
(See the [`--config`](#config-config-file) entry for how to find the config
file and choose its location.)
The easiest way to make the config is to run rclone with the config
option:
@@ -639,7 +639,7 @@ See `--copy-dest` and `--backup-dir`.
### --config=CONFIG_FILE ###
Specify the location of the rclone config file.
Specify the location of the rclone configuration file.
Normally the config file is in your home directory as a file called
`.config/rclone/rclone.conf` (or `.rclone.conf` if created with an
@@ -656,8 +656,44 @@ location is for you.
Use this flag to override the config location, e.g. `rclone
--config=".myconfig" .config`.
If this is set to empty string or the special value `/notfound` then
rclone will keep the config file in memory only.
If the location is set to empty string `""` or the special value
`/notfound`, or the os null device represented by value `NUL` on
Windows and `/dev/null` on Unix systems, then rclone will keep the
config file in memory only.
The file format is basic [INI](https://en.wikipedia.org/wiki/INI_file#Format):
Sections of text, led by a `[section]` header and followed by
`key=value` entries on separate lines. In rclone each remote is
represented by its own section, where the section name defines the
name of the remote. Options are specified as the `key=value` entries,
where the key is the option name without the `--backend-` prefix,
in lowercase and with `_` instead of `-`. E.g. option `--mega-hard-delete`
corresponds to key `hard_delete`. Only backend options can be specified.
A special, and required, key `type` identifies the [storage system](/overview/),
where the value is the internal lowercase name as returned by command
`rclone help backends`. Comments are indicated by `;` or `#` at the
beginning of a line.
Example:
[megaremote]
type = mega
user = you@example.com
pass = PDPcQVVjVtzFY-GTdDFozqBhTdsPg3qH
Note that passwords are in [obscured](/commands/rclone_obscure/)
form. Also, many storage systems uses token-based authentication instead
of passwords, and this requires additional steps. It is easier, and safer,
to use the interactive command `rclone config` instead of manually
editing the configuration file.
The configuration file will typically contain login information, and
should therefore have restricted permissions so that only the current user
can read it. Rclone tries to ensure this when it writes the file.
You may also choose to [encrypt](#configuration-encryption) the file.
When token-based authentication are used, the configuration file
must be writable, because rclone needs to update the tokens inside it.
### --contimeout=TIME ###
@@ -751,6 +787,27 @@ triggering follow-on actions if data was copied, or skipping if not.
NB: Enabling this option turns a usually non-fatal error into a potentially
fatal one - please check and adjust your scripts accordingly!
### --fs-cache-expire-duration=TIME
When using rclone via the API rclone caches created remotes for 5
minutes by default in the "fs cache". This means that if you do
repeated actions on the same remote then rclone won't have to build it
again from scratch, which makes it more efficient.
This flag sets the time that the remotes are cached for. If you set it
to `0` (or negative) then rclone won't cache the remotes at all.
Note that if you use some flags, eg `--backup-dir` and if this is set
to `0` rclone may build two remotes (one for the source or destination
and one for the `--backup-dir` where it may have only built one
before.
### --fs-cache-expire-interval=TIME
This controls how often rclone checks for cached remotes to expire.
See the `--fs-cache-expire-duration` documentation above for more
info. The default is 60s, set to 0 to disable expiry.
### --header ###
Add an HTTP header for all transactions. The flag can be repeated to

View File

@@ -378,6 +378,55 @@ call and taken by the [options/set](#options-set) calls as well as the
- `BandwidthSpec` - this will be set and returned as a string, eg
"1M".
## Specifying remotes to work on
Remotes are specified with the `fs=`, `srcFs=`, `dstFs=`
parameters depending on the command being used.
The parameters can be a string as per the rest of rclone, eg
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
JSON blobs.
If specifyng a JSON blob it should be a object mapping strings to
strings. These values will be used to configure the remote. There are
3 special values which may be set:
- `type` - set to `type` to specify a remote called `:type:`
- `_name` - set to `name` to specify a remote called `name:`
- `_root` - sets the root of the remote - may be empty
One of `_name` or `type` should normally be set. If the `local`
backend is desired then `type` should be set to `local`. If `_root`
isn't specified then it defaults to the root of the remote.
For example this JSON is equivalent to `remote:/tmp`
```
{
"_name": "remote",
"_path": "/tmp"
}
```
And this is equivalent to `:sftp,host='example.com':/tmp`
```
{
"type": "sftp",
"host": "example.com",
"_path": "/tmp"
}
```
And this is equivalent to `/tmp/dir`
```
{
type = "local",
_ path = "/tmp/dir"
}
```
## Supported commands
{{< rem autogenerated start "- run make rcdocs - don't edit here" >}}
### backend/command: Runs a backend command. {#backend-command}

37
fs/cache/cache.go vendored
View File

@@ -12,14 +12,26 @@ import (
)
var (
c = cache.New()
once sync.Once // creation
c *cache.Cache
mu sync.Mutex // mutex to protect remap
remap = map[string]string{} // map user supplied names to canonical names
)
// Create the cache just once
func createOnFirstUse() {
once.Do(func() {
ci := fs.GetConfig(context.Background())
c = cache.New()
c.SetExpireDuration(ci.FsCacheExpireDuration)
c.SetExpireInterval(ci.FsCacheExpireInterval)
})
}
// Canonicalize looks up fsString in the mapping from user supplied
// names to canonical names and return the canonical form
func Canonicalize(fsString string) string {
createOnFirstUse()
mu.Lock()
canonicalName, ok := remap[fsString]
mu.Unlock()
@@ -43,10 +55,11 @@ func addMapping(fsString, canonicalName string) {
// GetFn gets an fs.Fs named fsString either from the cache or creates
// it afresh with the create function
func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) {
fsString = Canonicalize(fsString)
createOnFirstUse()
canonicalFsString := Canonicalize(fsString)
created := false
value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) {
f, err = create(ctx, fsString)
value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) {
f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string
ok = err == nil || err == fs.ErrorIsFile
created = ok
return f, ok, err
@@ -58,19 +71,19 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
// Check we stored the Fs at the canonical name
if created {
canonicalName := fs.ConfigString(f)
if canonicalName != fsString {
if canonicalName != canonicalFsString {
// Note that if err == fs.ErrorIsFile at this moment
// then we can't rename the remote as it will have the
// wrong error status, we need to add a new one.
if err == nil {
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName)
value, found := c.Rename(fsString, canonicalName)
fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName)
value, found := c.Rename(canonicalFsString, canonicalName)
if found {
f = value.(fs.Fs)
}
addMapping(fsString, canonicalName)
addMapping(canonicalFsString, canonicalName)
} else {
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName)
fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName)
Put(canonicalName, f)
}
}
@@ -80,6 +93,7 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context
// Pin f into the cache until Unpin is called
func Pin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
@@ -97,6 +111,7 @@ func PinUntilFinalized(f fs.Fs, x interface{}) {
// Unpin f from the cache
func Unpin(f fs.Fs) {
createOnFirstUse()
c.Pin(fs.ConfigString(f))
}
@@ -127,6 +142,7 @@ func GetArr(ctx context.Context, fsStrings []string) (f []fs.Fs, err error) {
// Put puts an fs.Fs named fsString into the cache
func Put(fsString string, f fs.Fs) {
createOnFirstUse()
canonicalName := fs.ConfigString(f)
c.Put(canonicalName, f)
addMapping(fsString, canonicalName)
@@ -136,15 +152,18 @@ func Put(fsString string, f fs.Fs) {
//
// Returns number of entries deleted
func ClearConfig(name string) (deleted int) {
createOnFirstUse()
return c.DeletePrefix(name + ":")
}
// Clear removes everything from the cache
func Clear() {
createOnFirstUse()
c.Clear()
}
// Entries returns the number of entries in the cache
func Entries() int {
createOnFirstUse()
return c.Entries()
}

View File

@@ -33,7 +33,7 @@ func mockNewFs(t *testing.T) (func(), func(ctx context.Context, path string) (fs
panic("unreachable")
}
cleanup := func() {
c.Clear()
Clear()
}
return cleanup, create
}
@@ -42,12 +42,12 @@ func TestGet(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, Entries())
f2, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
@@ -59,13 +59,13 @@ func TestGetFile(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, c.Entries())
assert.Equal(t, 2, Entries())
f2, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -85,13 +85,13 @@ func TestGetFile2(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
require.NotNil(t, f)
assert.Equal(t, 2, c.Entries())
assert.Equal(t, 2, Entries())
f2, err := GetFn(context.Background(), "mock:file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -111,13 +111,13 @@ func TestGetError(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
f, err := GetFn(context.Background(), "mock:/error", create)
require.Equal(t, errSentinel, err)
require.Equal(t, nil, f)
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
}
func TestPut(t *testing.T) {
@@ -126,17 +126,17 @@ func TestPut(t *testing.T) {
f := mockfs.NewFs(context.Background(), "mock", "/alien")
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
Put("mock:/alien", f)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, Entries())
fNew, err := GetFn(context.Background(), "mock:/alien", create)
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, Entries())
// Check canonicalisation
@@ -146,7 +146,7 @@ func TestPut(t *testing.T) {
require.NoError(t, err)
require.Equal(t, f, fNew)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, Entries())
}
@@ -170,7 +170,7 @@ func TestClearConfig(t *testing.T) {
cleanup, create := mockNewFs(t)
defer cleanup()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
_, err := GetFn(context.Background(), "mock:/file.txt", create)
require.Equal(t, fs.ErrorIsFile, err)
@@ -190,11 +190,11 @@ func TestClear(t *testing.T) {
_, err := GetFn(context.Background(), "mock:/", create)
require.NoError(t, err)
assert.Equal(t, 1, c.Entries())
assert.Equal(t, 1, Entries())
Clear()
assert.Equal(t, 0, c.Entries())
assert.Equal(t, 0, Entries())
}
func TestEntries(t *testing.T) {

View File

@@ -123,6 +123,8 @@ type ConfigInfo struct {
RefreshTimes bool
NoConsole bool
TrafficClass uint8
FsCacheExpireDuration time.Duration
FsCacheExpireInterval time.Duration
}
// NewConfig creates a new config with everything set to the default
@@ -160,6 +162,8 @@ func NewConfig() *ConfigInfo {
c.MultiThreadStreams = 4
c.TrackRenamesStrategy = "hash"
c.FsCacheExpireDuration = 300 * time.Second
c.FsCacheExpireInterval = 60 * time.Second
return c
}

View File

@@ -128,6 +128,8 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files.")
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window. Supported on Windows only.")
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections. Can be value or names, eg. CS1, LE, DF, AF21.")
flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "cache remotes for this long (0 to disable caching)")
flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "interval to check for expired remotes")
}
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions

View File

@@ -450,6 +450,7 @@ func TestRcFsInfo(t *testing.T) {
func TestUploadFile(t *testing.T) {
r, call := rcNewRun(t, "operations/uploadfile")
defer r.Finalise()
ctx := context.Background()
testFileName := "test.txt"
testFileContent := "Hello World"
@@ -460,7 +461,7 @@ func TestUploadFile(t *testing.T) {
currentFile, err := os.Open(path.Join(r.LocalName, testFileName))
require.NoError(t, err)
formReader, contentType, _, err := rest.MultipartUpload(currentFile, url.Values{}, "file", testFileName)
formReader, contentType, _, err := rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
require.NoError(t, err)
httpReq := httptest.NewRequest("POST", "/", formReader)
@@ -482,7 +483,7 @@ func TestUploadFile(t *testing.T) {
currentFile, err = os.Open(path.Join(r.LocalName, testFileName))
require.NoError(t, err)
formReader, contentType, _, err = rest.MultipartUpload(currentFile, url.Values{}, "file", testFileName)
formReader, contentType, _, err = rest.MultipartUpload(ctx, currentFile, url.Values{}, "file", testFileName)
require.NoError(t, err)
httpReq = httptest.NewRequest("POST", "/", formReader)

View File

@@ -4,21 +4,64 @@ package rc
import (
"context"
"errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/config/configmap"
)
// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh
func GetFsNamed(ctx context.Context, in Params, fsName string) (f fs.Fs, err error) {
fsString, err := in.GetString(fsName)
if err != nil {
return nil, err
if !IsErrParamInvalid(err) {
return nil, err
}
fsString, err = getConfigMap(in, fsName)
if err != nil {
return nil, err
}
}
return cache.Get(ctx, fsString)
}
// getConfigMap gets the config as a map from in and converts it to a
// config string
//
// It uses the special parameters _name to name the remote and _root
// to make the root of the remote.
func getConfigMap(in Params, fsName string) (fsString string, err error) {
var m configmap.Simple
err = in.GetStruct(fsName, &m)
if err != nil {
return fsString, err
}
pop := func(key string) string {
value := m[key]
delete(m, key)
return value
}
Type := pop("type")
name := pop("_name")
root := pop("_root")
if name != "" {
fsString = name
} else if Type != "" {
fsString = ":" + Type
} else {
return fsString, errors.New(`couldn't find "type" or "_name" in JSON config definition`)
}
config := m.String()
if config != "" {
fsString += ","
fsString += config
}
fsString += ":"
fsString += root
return fsString, nil
}
// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh
func GetFs(ctx context.Context, in Params) (f fs.Fs, err error) {
return GetFsNamed(ctx, in, "fs")

View File

@@ -2,6 +2,7 @@ package rc
import (
"context"
"fmt"
"testing"
"github.com/rclone/rclone/fs/cache"
@@ -13,6 +14,8 @@ import (
func mockNewFs(t *testing.T) func() {
f := mockfs.NewFs(context.Background(), "mock", "mock")
cache.Put("/", f)
cache.Put("mock:/", f)
cache.Put(":mock:/", f)
return func() {
cache.Clear()
}
@@ -36,6 +39,98 @@ func TestGetFsNamed(t *testing.T) {
assert.Nil(t, f)
}
func TestGetFsNamedStruct(t *testing.T) {
defer mockNewFs(t)()
in := Params{
"potato": Params{
"type": "mock",
"_root": "/",
},
}
f, err := GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
in = Params{
"potato": Params{
"_name": "mock",
"_root": "/",
},
}
f, err = GetFsNamed(context.Background(), in, "potato")
require.NoError(t, err)
assert.NotNil(t, f)
}
func TestGetConfigMap(t *testing.T) {
for _, test := range []struct {
in Params
fsName string
wantFsString string
wantErr string
}{
{
in: Params{
"Fs": Params{},
},
fsName: "Fs",
wantErr: `couldn't find "type" or "_name" in JSON config definition`,
},
{
in: Params{
"Fs": Params{
"notastring": true,
},
},
fsName: "Fs",
wantErr: `cannot unmarshal bool`,
},
{
in: Params{
"Fs": Params{
"_name": "potato",
},
},
fsName: "Fs",
wantFsString: "potato:",
},
{
in: Params{
"Fs": Params{
"type": "potato",
},
},
fsName: "Fs",
wantFsString: ":potato:",
},
{
in: Params{
"Fs": Params{
"type": "sftp",
"_name": "potato",
"parameter": "42",
"parameter2": "true",
"_root": "/path/to/somewhere",
},
},
fsName: "Fs",
wantFsString: "potato,parameter='42',parameter2='true':/path/to/somewhere",
},
} {
gotFsString, gotErr := getConfigMap(test.in, test.fsName)
what := fmt.Sprintf("%+v", test.in)
assert.Equal(t, test.wantFsString, gotFsString, what)
if test.wantErr == "" {
assert.NoError(t, gotErr)
} else {
require.Error(t, gotErr)
assert.Contains(t, gotErr.Error(), test.wantErr)
}
}
}
func TestGetFs(t *testing.T) {
defer mockNewFs(t)()

View File

@@ -279,3 +279,26 @@ func (p Params) GetDuration(key string) (time.Duration, error) {
}
return duration, nil
}
// Error creates the standard response for an errored rc call using an
// rc.Param from a path, input Params, error and a suggested HTTP
// response code.
//
// It returns a Params and an updated status code
func Error(path string, in Params, err error, status int) (Params, int) {
// Adjust the status code for some well known errors
errOrig := errors.Cause(err)
switch {
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
status = http.StatusNotFound
case IsErrParamInvalid(err) || IsErrParamNotFound(err):
status = http.StatusBadRequest
}
result := Params{
"status": status,
"error": err.Error(),
"input": in,
"path": path,
}
return result, status
}

View File

@@ -169,21 +169,9 @@ func (s *Server) Serve() error {
// writeError writes a formatted error to the output
func writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) {
fs.Errorf(nil, "rc: %q: error: %v", path, err)
// Adjust the error return for some well known errors
errOrig := errors.Cause(err)
switch {
case errOrig == fs.ErrorDirNotFound || errOrig == fs.ErrorObjectNotFound:
status = http.StatusNotFound
case rc.IsErrParamInvalid(err) || rc.IsErrParamNotFound(err):
status = http.StatusBadRequest
}
params, status := rc.Error(path, in, err, status)
w.WriteHeader(status)
err = rc.WriteJSON(w, rc.Params{
"status": status,
"error": err.Error(),
"input": in,
"path": path,
})
err = rc.WriteJSON(w, params)
if err != nil {
// can't return the error at this point
fs.Errorf(nil, "rc: writeError: failed to write JSON output from %#v: %v", in, err)

View File

@@ -12,6 +12,7 @@ Make TesTrun have a []string of flags to try - that then makes it generic
*/
import (
"context"
"flag"
"log"
"math/rand"
@@ -22,6 +23,7 @@ import (
"time"
_ "github.com/rclone/rclone/backend/all" // import all fs
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/lib/pacer"
)
@@ -70,6 +72,7 @@ func main() {
log.Println("test_all should be run from the root of the rclone source code")
log.Fatal(err)
}
configfile.LoadConfig(context.Background())
// Seed the random number generator
rand.Seed(time.Now().UTC().UnixNano())

2
go.mod
View File

@@ -17,7 +17,7 @@ require (
github.com/anacrolix/dms v1.2.0
github.com/atotto/clipboard v0.1.2
github.com/aws/aws-sdk-go v1.37.3
github.com/billziss-gh/cgofuse v1.4.0
github.com/billziss-gh/cgofuse v1.5.0
github.com/buengese/sgzip v0.1.1
github.com/calebcase/tmpfile v1.0.2 // indirect
github.com/colinmarc/hdfs/v2 v2.2.0

2
go.sum
View File

@@ -113,6 +113,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/billziss-gh/cgofuse v1.4.0 h1:kju2jDmdNuDDCrxPob2ggmZr5Mj/odCjU1Y8kx0Th9E=
github.com/billziss-gh/cgofuse v1.4.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
github.com/billziss-gh/cgofuse v1.5.0 h1:kH516I/s+Ab4diL/Y/ayFeUjjA8ey+JK12xDfBf4HEs=
github.com/billziss-gh/cgofuse v1.5.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=
github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo=

31
lib/cache/cache.go vendored
View File

@@ -28,6 +28,30 @@ func New() *Cache {
}
}
// SetExpireDuration sets the interval at which things expire
//
// If it is less than or equal to 0 then things are never cached
func (c *Cache) SetExpireDuration(d time.Duration) *Cache {
c.expireDuration = d
return c
}
// returns true if we aren't to cache anything
func (c *Cache) noCache() bool {
return c.expireDuration <= 0
}
// SetExpireInterval sets the interval at which the cache expiry runs
//
// Set to 0 or a -ve number to disable
func (c *Cache) SetExpireInterval(d time.Duration) *Cache {
if d <= 0 {
d = 100 * 365 * 24 * time.Hour
}
c.expireInterval = d
return c
}
// cacheEntry is stored in the cache
type cacheEntry struct {
value interface{} // cached item
@@ -69,7 +93,9 @@ func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error
err: err,
}
c.mu.Lock()
c.cache[key] = entry
if !c.noCache() {
c.cache[key] = entry
}
}
defer c.mu.Unlock()
c.used(entry)
@@ -100,6 +126,9 @@ func (c *Cache) Unpin(key string) {
func (c *Cache) Put(key string, value interface{}) {
c.mu.Lock()
defer c.mu.Unlock()
if c.noCache() {
return
}
entry := &cacheEntry{
value: value,
key: key,

View File

@@ -100,7 +100,7 @@ func TestPut(t *testing.T) {
func TestCacheExpire(t *testing.T) {
c, create := setup(t)
c.expireInterval = time.Millisecond
c.SetExpireInterval(time.Millisecond)
assert.Equal(t, false, c.expireRunning)
_, err := c.Get("/", create)
@@ -127,6 +127,31 @@ func TestCacheExpire(t *testing.T) {
c.mu.Unlock()
}
func TestCacheNoExpire(t *testing.T) {
c, create := setup(t)
assert.False(t, c.noCache())
c.SetExpireDuration(0)
assert.Equal(t, false, c.expireRunning)
assert.True(t, c.noCache())
f, err := c.Get("/", create)
require.NoError(t, err)
require.NotNil(t, f)
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
c.Put("/alien", "slime")
c.mu.Lock()
assert.Equal(t, 0, len(c.cache))
c.mu.Unlock()
}
func TestCachePin(t *testing.T) {
c, create := setup(t)

28
lib/readers/context.go Normal file
View File

@@ -0,0 +1,28 @@
package readers
import (
"context"
"io"
)
// NewContextReader creates a reader, that returns any errors that ctx gives
func NewContextReader(ctx context.Context, r io.Reader) io.Reader {
return &contextReader{
ctx: ctx,
r: r,
}
}
type contextReader struct {
ctx context.Context
r io.Reader
}
// Read bytes as per io.Reader interface
func (cr *contextReader) Read(p []byte) (n int, err error) {
err = cr.ctx.Err()
if err != nil {
return 0, err
}
return cr.r.Read(p)
}

View File

@@ -0,0 +1,28 @@
package readers
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestContextReader(t *testing.T) {
r := NewPatternReader(100)
ctx, cancel := context.WithCancel(context.Background())
cr := NewContextReader(ctx, r)
var buf = make([]byte, 3)
n, err := cr.Read(buf)
require.NoError(t, err)
assert.Equal(t, 3, n)
assert.Equal(t, []byte{0, 1, 2}, buf)
cancel()
n, err = cr.Read(buf)
assert.Equal(t, context.Canceled, err)
assert.Equal(t, 0, n)
}

View File

@@ -308,7 +308,7 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e
// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required
//
// NB This doesn't allow setting the content type of the attachment
func MultipartUpload(in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) {
bodyReader, bodyWriter := io.Pipe()
writer := multipart.NewWriter(bodyWriter)
contentType := writer.FormDataContentType()
@@ -343,8 +343,21 @@ func MultipartUpload(in io.Reader, params url.Values, contentName, fileName stri
multipartLength := int64(buf.Len())
// Make sure we close the pipe writer to release the reader on context cancel
quit := make(chan struct{})
go func() {
select {
case <-quit:
break
case <-ctx.Done():
_ = bodyWriter.CloseWithError(ctx.Err())
}
}()
// Pump the data in the background
go func() {
defer close(quit)
var err error
for key, vals := range params {
@@ -452,7 +465,7 @@ func (api *Client) callCodec(ctx context.Context, opts *Opts, request interface{
opts = opts.Copy()
var overhead int64
opts.Body, opts.ContentType, overhead, err = MultipartUpload(opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
opts.Body, opts.ContentType, overhead, err = MultipartUpload(ctx, opts.Body, params, opts.MultipartContentName, opts.MultipartFileName)
if err != nil {
return nil, err
}