1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-15 15:53:41 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
1d1d847f18 union: change epff policy to search local disks first
Its always been random which remote epff/ff finds first. Make it so
that we search local disks first which will save on network resources.

See: https://forum.rclone.org/t/rclone-union-no-longer-preferring-local-copies-windows/32002/3
2022-07-21 17:23:56 +01:00
33 changed files with 383 additions and 511 deletions

View File

@@ -25,12 +25,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
include:
- job_name: linux
os: ubuntu-latest
go: '1.19.x'
go: '1.18.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -41,14 +41,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.19.x'
go: '1.18.x'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.19.x'
go: '1.18.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -57,14 +57,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.19.x'
go: '1.18.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.19.x'
go: '1.18.x'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -74,20 +74,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.19.x'
go: '1.18.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.17
- job_name: go1.16
os: ubuntu-latest
go: '1.17.x'
go: '1.16.x'
quicktest: true
racequicktest: true
- job_name: go1.18
- job_name: go1.17
os: ubuntu-latest
go: '1.18.x'
go: '1.17.x'
quicktest: true
racequicktest: true
@@ -227,12 +227,9 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# FIXME temporary until golangci-lint supports go1.19
go-version: 1.18.x
- name: Code quality test
uses: golangci/golangci-lint-action@v3
uses: golangci/golangci-lint-action@v2
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
@@ -253,7 +250,11 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v1
with:
go-version: 1.19.x
go-version: 1.18.x
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
- name: Force NDK version
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
- name: Go module cache
uses: actions/cache@v2
@@ -277,29 +278,27 @@ jobs:
go install golang.org/x/mobile/cmd/gobind@latest
go install golang.org/x/mobile/cmd/gomobile@latest
env PATH=$PATH:~/go/bin gomobile init
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
- name: arm-v7a gomobile build
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
- name: arm-v7a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm' >> $GITHUB_ENV
echo 'GOARM=7' >> $GITHUB_ENV
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm-v7a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
- name: arm64-v8a Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=arm64' >> $GITHUB_ENV
@@ -307,12 +306,12 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: arm64-v8a build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
- name: x86 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=386' >> $GITHUB_ENV
@@ -320,12 +319,12 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x86 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
- name: x64 Set environment variables
shell: bash
run: |
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
echo 'GOOS=android' >> $GITHUB_ENV
echo 'GOARCH=amd64' >> $GITHUB_ENV
@@ -333,7 +332,7 @@ jobs:
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
- name: x64 build
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
- name: Upload artifacts
run: |

View File

@@ -5,7 +5,7 @@ linters:
- deadcode
- errcheck
- goimports
- revive
#- revive
- ineffassign
- structcheck
- varcheck

View File

@@ -145,7 +145,6 @@ func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, er
dir: dir,
pathAdjustment: newAdjustment(f.root, dir),
}
cache.PinUntilFinalized(u.f, u)
return u, nil
}

View File

@@ -1435,7 +1435,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
}
if entryPath != "" {
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
notifyFunc(entryPath, entryType)
}
}
if !changeList.HasMore {
@@ -1697,9 +1697,6 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
if size > 0 {
// if size is known, check if next chunk is final
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
if in.BytesRead() > uint64(size) {
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
}
} else {
// if size is unknown, upload as long as we can read full chunks from the reader
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)

View File

@@ -572,7 +572,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err
}
bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
}
// Copy src to this remote using server-side copy operations.
@@ -760,7 +760,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{
Method: "GET",
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
Options: optionsFixed,
}
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -152,7 +152,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
m.Set(configClientSecret, "")
srv := rest.NewClient(fshttp.NewClient(ctx))
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
token, tokenEndpoint, username, err := doTokenAuth(ctx, srv, loginToken)
if err != nil {
return nil, fmt.Errorf("failed to get oauth token: %w", err)
}
@@ -161,6 +161,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
if err != nil {
return nil, fmt.Errorf("error while saving token: %w", err)
}
m.Set(configUsername, username)
return fs.ConfigGoto("choose_device")
case "legacy": // configure a jottacloud backend using legacy authentication
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
@@ -271,21 +272,30 @@ sync or the backup section, for example, you must choose yes.`)
if config.Result != "true" {
m.Set(configDevice, "")
m.Set(configMountpoint, "")
}
username, userOk := m.Get(configUsername)
if userOk && config.Result != "true" {
return fs.ConfigGoto("end")
}
oAuthClient, _, err := getOAuthClient(ctx, name, m)
if err != nil {
return nil, err
}
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
if !userOk {
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
username = cust.Username
m.Set(configUsername, username)
if config.Result != "true" {
return fs.ConfigGoto("end")
}
}
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
acc, err := getDriveInfo(ctx, jfsSrv, username)
if err != nil {
return nil, err
}
@@ -316,14 +326,10 @@ a new by entering a unique name.`, defaultDevice)
return nil, err
}
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
username, _ := m.Get(configUsername)
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
acc, err := getDriveInfo(ctx, jfsSrv, username)
if err != nil {
return nil, err
}
@@ -338,7 +344,7 @@ a new by entering a unique name.`, defaultDevice)
var dev *api.JottaDevice
if isNew {
fs.Debugf(nil, "Creating new device: %s", device)
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
dev, err = createDevice(ctx, jfsSrv, path.Join(username, device))
if err != nil {
return nil, err
}
@@ -346,7 +352,7 @@ a new by entering a unique name.`, defaultDevice)
m.Set(configDevice, device)
if !isNew {
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(username, device))
if err != nil {
return nil, err
}
@@ -376,16 +382,11 @@ You may create a new by entering a unique name.`, device)
return nil, err
}
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
cust, err := getCustomerInfo(ctx, apiSrv)
if err != nil {
return nil, err
}
username, _ := m.Get(configUsername)
device, _ := m.Get(configDevice)
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(username, device))
if err != nil {
return nil, err
}
@@ -403,7 +404,7 @@ You may create a new by entering a unique name.`, device)
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
}
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
_, err := createMountPoint(ctx, jfsSrv, path.Join(username, device, mountpoint))
if err != nil {
return nil, err
}
@@ -590,10 +591,10 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
}
// doTokenAuth runs the actual token request for V2 authentication
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, username string, err error) {
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, "", err
return token, "", "", err
}
// decode login token
@@ -601,7 +602,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, "", err
return token, "", "", err
}
// retrieve endpoint urls
@@ -612,7 +613,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
var wellKnown api.WellKnown
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
if err != nil {
return token, "", err
return token, "", "", err
}
// prepare out token request with username and password
@@ -634,14 +635,14 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
var jsonToken api.TokenJSON
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, "", err
return token, "", "", err
}
token.AccessToken = jsonToken.AccessToken
token.RefreshToken = jsonToken.RefreshToken
token.TokenType = jsonToken.TokenType
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
return token, wellKnown.TokenEndpoint, err
return token, wellKnown.TokenEndpoint, loginToken.Username, err
}
// getCustomerInfo queries general information about the account
@@ -943,11 +944,17 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return err
})
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
user, userOk := m.Get(configUsername)
if userOk {
f.user = user
} else {
fs.Infof(nil, "Username not found in config and must be looked up, reconfigure to avoid the extra request")
cust, err := getCustomerInfo(ctx, f.apiSrv)
if err != nil {
return nil, err
}
f.user = cust.Username
}
f.user = cust.Username
f.setEndpoints()
if root != "" && !rootIsDir {

View File

@@ -16,11 +16,14 @@ func init() {
// Given the order of the candidates, act on the first one found where the relative path exists.
type EpFF struct{}
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
func (p *EpFF) epffIsLocal(ctx context.Context, upstreams []*upstream.Fs, filePath string, isLocal bool) (*upstream.Fs, error) {
ch := make(chan *upstream.Fs, len(upstreams))
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for _, u := range upstreams {
if u.IsLocal() != isLocal {
continue
}
u := u // Closure
go func() {
rfs := u.RootFs
@@ -32,7 +35,10 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
}()
}
var u *upstream.Fs
for range upstreams {
for _, upstream := range upstreams {
if upstream.IsLocal() != isLocal {
continue
}
u = <-ch
if u != nil {
break
@@ -44,6 +50,15 @@ func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath stri
return u, nil
}
func (p *EpFF) epff(ctx context.Context, upstreams []*upstream.Fs, filePath string) (*upstream.Fs, error) {
// search local disks first
u, err := p.epffIsLocal(ctx, upstreams, filePath, true)
if err == fs.ErrorObjectNotFound {
u, err = p.epffIsLocal(ctx, upstreams, filePath, false)
}
return u, err
}
// Action category policy, governing the modification of files and directories
func (p *EpFF) Action(ctx context.Context, upstreams []*upstream.Fs, path string) ([]*upstream.Fs, error) {
if len(upstreams) == 0 {

View File

@@ -34,6 +34,7 @@ type Fs struct {
Opt *common.Options
writable bool
creatable bool
isLocal bool
usage *fs.Usage // Cache the usage
cacheTime time.Duration // cache duration
cacheMutex sync.RWMutex
@@ -95,6 +96,7 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
return nil, err
}
f.RootFs = rFs
f.isLocal = rFs.Features().IsLocal
rootString := fspath.JoinRootPath(remote, root)
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
@@ -142,6 +144,11 @@ func (f *Fs) WrapEntry(e fs.DirEntry) (Entry, error) {
}
}
// IsLocal true if the upstream Fs is a local disk
func (f *Fs) IsLocal() bool {
return f.isLocal
}
// UpstreamFs get the upstream Fs the entry is stored in
func (e *Directory) UpstreamFs() *Fs {
return e.f

View File

@@ -1,3 +1,6 @@
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -1,3 +1,6 @@
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -1,4 +1,8 @@
// Package restic serves a remote suitable for use with restic
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -1,3 +1,6 @@
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -1,3 +1,6 @@
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -1,6 +1,9 @@
// Serve restic tests set up a server and run the integration tests
// for restic against it.
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -0,0 +1,14 @@
// Build for restic for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build !go1.17
// +build !go1.17
package restic
import (
"github.com/spf13/cobra"
)
// Command definition for cobra
var Command *cobra.Command

View File

@@ -1,3 +1,6 @@
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -1,3 +1,6 @@
//go:build go1.17
// +build go1.17
package restic
import (

View File

@@ -191,7 +191,7 @@ kill %1
## Install from source
Make sure you have git and [Go](https://golang.org/) installed.
Go version 1.17 or newer is required, latest release is recommended.
Go version 1.16 or newer is required, latest release is recommended.
You can get it from your package manager, or download it from
[golang.org/dl](https://golang.org/dl/). Then you can run the following:

View File

@@ -2,11 +2,14 @@
package filter
import (
"bufio"
"context"
"errors"
"fmt"
"log"
"os"
"path"
"regexp"
"strings"
"time"
@@ -19,19 +22,80 @@ import (
// This is accessed through GetConfig and AddConfig
var globalConfig = mustNewFilter(nil)
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
func (r *rule) Match(path string) bool {
return r.Regexp.MatchString(path)
}
// String the rule
func (r *rule) String() string {
c := "-"
if r.Include {
c = "+"
}
return fmt.Sprintf("%s %s", c, r.Regexp.String())
}
// rules is a slice of rules
type rules struct {
rules []rule
existing map[string]struct{}
}
// add adds a rule if it doesn't exist already
func (rs *rules) add(Include bool, re *regexp.Regexp) {
if rs.existing == nil {
rs.existing = make(map[string]struct{})
}
newRule := rule{
Include: Include,
Regexp: re,
}
newRuleString := newRule.String()
if _, ok := rs.existing[newRuleString]; ok {
return // rule already exists
}
rs.rules = append(rs.rules, newRule)
rs.existing[newRuleString] = struct{}{}
}
// clear clears all the rules
func (rs *rules) clear() {
rs.rules = nil
rs.existing = nil
}
// len returns the number of rules
func (rs *rules) len() int {
return len(rs.rules)
}
// FilesMap describes the map of files to transfer
type FilesMap map[string]struct{}
// Opt configures the filter
type Opt struct {
DeleteExcluded bool
RulesOpt
ExcludeFile []string
MetaRules RulesOpt
FilesFrom []string
FilesFromRaw []string
MinAge fs.Duration
MaxAge fs.Duration
MinSize fs.SizeSuffix
MaxSize fs.SizeSuffix
IgnoreCase bool
FilterRule []string
FilterFrom []string
ExcludeRule []string
ExcludeFrom []string
ExcludeFile []string
IncludeRule []string
IncludeFrom []string
FilesFrom []string
FilesFromRaw []string
MinAge fs.Duration
MaxAge fs.Duration
MinSize fs.SizeSuffix
MaxSize fs.SizeSuffix
IgnoreCase bool
}
// DefaultOpt is the default config for the filter
@@ -42,9 +106,6 @@ var DefaultOpt = Opt{
MaxSize: fs.SizeSuffix(-1),
}
// FilesMap describes the map of files to transfer
type FilesMap map[string]struct{}
// Filter describes any filtering in operation
type Filter struct {
Opt Opt
@@ -52,7 +113,6 @@ type Filter struct {
ModTimeTo time.Time
fileRules rules
dirRules rules
metaRules rules
files FilesMap // files if filesFrom
dirs FilesMap // dirs from filesFrom
}
@@ -82,14 +142,57 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
}
err = parseRules(&f.Opt.RulesOpt, f.Add, f.Clear)
if err != nil {
return nil, err
addImplicitExclude := false
foundExcludeRule := false
for _, rule := range f.Opt.IncludeRule {
err = f.Add(true, rule)
if err != nil {
return nil, err
}
addImplicitExclude = true
}
for _, rule := range f.Opt.IncludeFrom {
err := forEachLine(rule, false, func(line string) error {
return f.Add(true, line)
})
if err != nil {
return nil, err
}
addImplicitExclude = true
}
for _, rule := range f.Opt.ExcludeRule {
err = f.Add(false, rule)
if err != nil {
return nil, err
}
foundExcludeRule = true
}
for _, rule := range f.Opt.ExcludeFrom {
err := forEachLine(rule, false, func(line string) error {
return f.Add(false, line)
})
if err != nil {
return nil, err
}
foundExcludeRule = true
}
err = parseRules(&f.Opt.MetaRules, f.metaRules.Add, f.metaRules.clear)
if err != nil {
return nil, err
if addImplicitExclude && foundExcludeRule {
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
}
for _, rule := range f.Opt.FilterRule {
err = f.AddRule(rule)
if err != nil {
return nil, err
}
}
for _, rule := range f.Opt.FilterFrom {
err := forEachLine(rule, false, f.AddRule)
if err != nil {
return nil, err
}
}
inActive := f.InActive()
@@ -122,6 +225,12 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
}
}
if addImplicitExclude {
err = f.Add(false, "/**")
if err != nil {
return nil, err
}
}
if fs.GetConfig(context.Background()).Dump&fs.DumpFilters != 0 {
fmt.Println("--- start filters ---")
fmt.Println(f.DumpFilters())
@@ -200,7 +309,16 @@ func (f *Filter) Add(Include bool, glob string) error {
//
// Line comments may be introduced with '#' or ';'
func (f *Filter) AddRule(rule string) error {
return addRule(rule, f.Add, f.Clear)
switch {
case rule == "!":
f.Clear()
return nil
case strings.HasPrefix(rule, "- "):
return f.Add(false, rule[2:])
case strings.HasPrefix(rule, "+ "):
return f.Add(true, rule[2:])
}
return fmt.Errorf("malformed rule %q", rule)
}
// initAddFile creates f.files and f.dirs
@@ -241,7 +359,6 @@ func (f *Filter) Files() FilesMap {
func (f *Filter) Clear() {
f.fileRules.clear()
f.dirRules.clear()
f.metaRules.clear()
}
// InActive returns false if any filters are active
@@ -253,13 +370,17 @@ func (f *Filter) InActive() bool {
f.Opt.MaxSize < 0 &&
f.fileRules.len() == 0 &&
f.dirRules.len() == 0 &&
f.metaRules.len() == 0 &&
len(f.Opt.ExcludeFile) == 0)
}
// IncludeRemote returns whether this remote passes the filter rules.
func (f *Filter) IncludeRemote(remote string) bool {
return f.fileRules.include(remote)
for _, rule := range f.fileRules.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// ListContainsExcludeFile checks if exclude file is present in the list.
@@ -302,7 +423,13 @@ func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (b
return include, nil
}
remote += "/"
return f.dirRules.include(remote), nil
for _, rule := range f.dirRules.rules {
if rule.Match(remote) {
return rule.Include, nil
}
}
return true, nil
}
}
@@ -326,7 +453,7 @@ func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remo
// Include returns whether this object should be included into the
// sync or not
func (f *Filter) Include(remote string, size int64, modTime time.Time, metadata fs.Metadata) bool {
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
// filesFrom takes precedence
if f.files != nil {
_, include := f.files[remote]
@@ -344,15 +471,6 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time, metadata
if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
return false
}
if f.metaRules.len() > 0 && len(metadata) > 0 {
metadatas := make([]string, 0, len(metadata))
for key, value := range metadata {
metadatas = append(metadatas, fmt.Sprintf("%s=%s", key, value))
}
if !f.metaRules.includeMany(metadatas) {
return false
}
}
return f.IncludeRemote(remote)
}
@@ -367,17 +485,39 @@ func (f *Filter) IncludeObject(ctx context.Context, o fs.Object) bool {
} else {
modTime = time.Unix(0, 0)
}
var metadata fs.Metadata
if f.metaRules.len() > 0 {
var err error
metadata, err = fs.GetMetadata(ctx, o)
if err != nil {
fs.Errorf(o, "Failed to read metadata: %v", err)
metadata = nil
}
return f.Include(o.Remote(), o.Size(), modTime)
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';' if raw is false
func forEachLine(path string, raw bool, fn func(string) error) (err error) {
var scanner *bufio.Scanner
if path == "-" {
scanner = bufio.NewScanner(os.Stdin)
} else {
in, err := os.Open(path)
if err != nil {
return err
}
scanner = bufio.NewScanner(in)
defer fs.CheckClose(in, &err)
}
return f.Include(o.Remote(), o.Size(), modTime, metadata)
for scanner.Scan() {
line := scanner.Text()
if !raw {
line = strings.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
continue
}
}
err := fn(line)
if err != nil {
return err
}
}
return scanner.Err()
}
// DumpFilters dumps the filters in textual form, 1 per line
@@ -397,12 +537,6 @@ func (f *Filter) DumpFilters() string {
for _, dirRule := range f.dirRules.rules {
rules = append(rules, dirRule.String())
}
if f.metaRules.len() > 0 {
rules = append(rules, "--- Metadata filter rules ---")
for _, metaRule := range f.metaRules.rules {
rules = append(rules, metaRule.String())
}
}
return strings.Join(rules, "\n")
}

View File

@@ -208,7 +208,7 @@ type includeTest struct {
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
for _, test := range tests {
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0), nil)
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, size=%v, modTime=%v", test.in, test.size, time.Unix(test.modTime, 0)))
}
}
@@ -714,7 +714,7 @@ func TestFilterMatchesFromDocs(t *testing.T) {
require.NoError(t, err)
err = f.Add(false, "*")
require.NoError(t, err)
included := f.Include(test.file, 0, time.Unix(0, 0), nil)
included := f.Include(test.file, 0, time.Unix(0, 0))
if included != test.included {
t.Errorf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
}

View File

@@ -3,7 +3,6 @@ package filterflags
import (
"context"
"fmt"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/filter"
@@ -27,27 +26,17 @@ func Reload(ctx context.Context) (err error) {
return nil
}
// AddRuleFlags add a set of rules flags with prefix
func AddRuleFlags(flagSet *pflag.FlagSet, Opt *filter.RulesOpt, what, prefix string) {
shortFilter := ""
if prefix == "" {
shortFilter = "f"
}
flags.StringArrayVarP(flagSet, &Opt.FilterRule, prefix+"filter", shortFilter, nil, fmt.Sprintf("Add a %s filtering rule", what))
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, prefix+"filter-from", "", nil, fmt.Sprintf("Read %s filtering patterns from a file (use - to read from stdin)", what))
flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, prefix+"exclude", "", nil, fmt.Sprintf("Exclude %ss matching pattern", what))
flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, prefix+"exclude-from", "", nil, fmt.Sprintf("Read %s exclude patterns from file (use - to read from stdin)", what))
flags.StringArrayVarP(flagSet, &Opt.IncludeRule, prefix+"include", "", nil, fmt.Sprintf("Include %ss matching pattern", what))
flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, prefix+"include-from", "", nil, fmt.Sprintf("Read %s include patterns from file (use - to read from stdin)", what))
}
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOptionReload("filter", &Opt, Reload)
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
AddRuleFlags(flagSet, &Opt.RulesOpt, "file", "")
AddRuleFlags(flagSet, &Opt.MetaRules, "metadata", "metadata-")
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file (use - to read from stdin)")
flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, "exclude", "", nil, "Exclude files matching pattern")
flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, "exclude-from", "", nil, "Read exclude patterns from file (use - to read from stdin)")
flags.StringArrayVarP(flagSet, &Opt.ExcludeFile, "exclude-if-present", "", nil, "Exclude directories if filename is present")
flags.StringArrayVarP(flagSet, &Opt.IncludeRule, "include", "", nil, "Include files matching pattern")
flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, "include-from", "", nil, "Read include patterns from file (use - to read from stdin)")
flags.StringArrayVarP(flagSet, &Opt.FilesFrom, "files-from", "", nil, "Read list of source-file names from file (use - to read from stdin)")
flags.StringArrayVarP(flagSet, &Opt.FilesFromRaw, "files-from-raw", "", nil, "Read list of source-file names from file without any processing of lines (use - to read from stdin)")
flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y")

View File

@@ -1,253 +0,0 @@
package filter
import (
"bufio"
"fmt"
"os"
"regexp"
"strings"
"github.com/rclone/rclone/fs"
)
// RulesOpt is configuration for a rule set
type RulesOpt struct {
FilterRule []string
FilterFrom []string
ExcludeRule []string
ExcludeFrom []string
IncludeRule []string
IncludeFrom []string
}
// rule is one filter rule
type rule struct {
Include bool
Regexp *regexp.Regexp
}
// Match returns true if rule matches path
func (r *rule) Match(path string) bool {
return r.Regexp.MatchString(path)
}
// String the rule
func (r *rule) String() string {
c := "-"
if r.Include {
c = "+"
}
return fmt.Sprintf("%s %s", c, r.Regexp.String())
}
// rules is a slice of rules
type rules struct {
rules []rule
existing map[string]struct{}
}
type addFn func(Include bool, glob string) error
// add adds a rule if it doesn't exist already
func (rs *rules) add(Include bool, re *regexp.Regexp) {
if rs.existing == nil {
rs.existing = make(map[string]struct{})
}
newRule := rule{
Include: Include,
Regexp: re,
}
newRuleString := newRule.String()
if _, ok := rs.existing[newRuleString]; ok {
return // rule already exists
}
rs.rules = append(rs.rules, newRule)
rs.existing[newRuleString] = struct{}{}
}
// Add adds a filter rule with include or exclude status indicated
func (rs *rules) Add(Include bool, glob string) error {
re, err := GlobToRegexp(glob, false /* f.Opt.IgnoreCase */)
if err != nil {
return err
}
rs.add(Include, re)
return nil
}
type clearFn func()
// clear clears all the rules
func (rs *rules) clear() {
rs.rules = nil
rs.existing = nil
}
// len returns the number of rules
func (rs *rules) len() int {
return len(rs.rules)
}
// include returns whether this remote passes the filter rules.
func (rs *rules) include(remote string) bool {
for _, rule := range rs.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// include returns whether this collection of strings remote passes
// the filter rules.
//
// the first rule is evaluated on all the remotes and if it matches
// then the result is returned. If not the next rule is tested and so
// on.
func (rs *rules) includeMany(remotes []string) bool {
for _, rule := range rs.rules {
for _, remote := range remotes {
if rule.Match(remote) {
return rule.Include
}
}
}
return true
}
// forEachLine calls fn on every line in the file pointed to by path
//
// It ignores empty lines and lines starting with '#' or ';' if raw is false
func forEachLine(path string, raw bool, fn func(string) error) (err error) {
var scanner *bufio.Scanner
if path == "-" {
scanner = bufio.NewScanner(os.Stdin)
} else {
in, err := os.Open(path)
if err != nil {
return err
}
scanner = bufio.NewScanner(in)
defer fs.CheckClose(in, &err)
}
for scanner.Scan() {
line := scanner.Text()
if !raw {
line = strings.TrimSpace(line)
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
continue
}
}
err := fn(line)
if err != nil {
return err
}
}
return scanner.Err()
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func addRule(rule string, add addFn, clear clearFn) error {
switch {
case rule == "!":
clear()
return nil
case strings.HasPrefix(rule, "- "):
return add(false, rule[2:])
case strings.HasPrefix(rule, "+ "):
return add(true, rule[2:])
}
return fmt.Errorf("malformed rule %q", rule)
}
// AddRule adds a filter rule with include/exclude indicated by the prefix
//
// These are
//
// + glob
// - glob
// !
//
// '+' includes the glob, '-' excludes it and '!' resets the filter list
//
// Line comments may be introduced with '#' or ';'
func (rs *rules) AddRule(rule string) error {
return addRule(rule, rs.Add, rs.clear)
}
// Parse the rules passed in and add them to the function
func parseRules(opt *RulesOpt, add addFn, clear clearFn) (err error) {
addImplicitExclude := false
foundExcludeRule := false
for _, rule := range opt.IncludeRule {
err = add(true, rule)
if err != nil {
return err
}
addImplicitExclude = true
}
for _, rule := range opt.IncludeFrom {
err := forEachLine(rule, false, func(line string) error {
return add(true, line)
})
if err != nil {
return err
}
addImplicitExclude = true
}
for _, rule := range opt.ExcludeRule {
err = add(false, rule)
if err != nil {
return err
}
foundExcludeRule = true
}
for _, rule := range opt.ExcludeFrom {
err := forEachLine(rule, false, func(line string) error {
return add(false, line)
})
if err != nil {
return err
}
foundExcludeRule = true
}
if addImplicitExclude && foundExcludeRule {
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
}
for _, rule := range opt.FilterRule {
err = addRule(rule, add, clear)
if err != nil {
return err
}
}
for _, rule := range opt.FilterFrom {
err := forEachLine(rule, false, func(rule string) error {
return addRule(rule, add, clear)
})
if err != nil {
return err
}
}
if addImplicitExclude {
err = add(false, "/**")
if err != nil {
return err
}
}
return nil
}

View File

@@ -1515,7 +1515,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
dir := toDelete[i]
// If a filter matches the directory then that
// directory is a candidate for deletion
if !fi.IncludeRemote(dir + "/") {
if !fi.Include(dir+"/", 0, time.Now()) {
continue
}
err = TryRmdir(ctx, f, dir)
@@ -1952,17 +1952,11 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
return err
}
}
needTransfer := NeedTransfer(ctx, dstObj, srcObj)
if needTransfer {
NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir)
if err != nil {
return err
}
if NoNeedTransfer {
needTransfer = false
}
NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir)
if err != nil {
return err
}
if needTransfer {
if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) {
// If destination already exists, then we must move it into --backup-dir if required
if dstObj != nil && backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dstObj)

View File

@@ -331,17 +331,11 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src)
// Check to see if can store this
if src.Storable() {
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
if needTransfer {
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir)
if err != nil {
s.processError(err)
}
if NoNeedTransfer {
needTransfer = false
}
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir)
if err != nil {
s.processError(err)
}
if needTransfer {
if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) {
// If files are treated as immutable, fail if destination exists and does not match
if s.ci.Immutable && pair.Dst != nil {
err := fs.CountError(fserrors.NoRetryError(fs.ErrorImmutableModified))

View File

@@ -1,8 +1,8 @@
//go:build !go1.17
// +build !go1.17
//go:build !go1.16
// +build !go1.16
package fs
// Upgrade to Go version 1.17 to compile rclone - latest stable go
// Upgrade to Go version 1.16 to compile rclone - latest stable go
// compiler recommended.
func init() { Go_version_1_17_required_for_compilation() }
func init() { Go_version_1_16_required_for_compilation() }

61
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/rclone/rclone
go 1.17
go 1.16
require (
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05
@@ -51,7 +51,7 @@ require (
github.com/spf13/cobra v1.4.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.2
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8
github.com/winfsp/cgofuse v1.5.1-0.20220421173602-ce7e5a65cac7
github.com/xanzy/ssh-agent v0.3.1
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
@@ -70,63 +70,6 @@ require (
storj.io/uplink v1.9.0
)
require (
cloud.google.com/go/compute v1.6.1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/anacrolix/log v0.10.1-0.20220126091220-5c1b6f3af59c // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/hashicorp/errwrap v1.0.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
github.com/jcmturner/gofork v1.0.0 // indirect
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/mattn/go-ieproxy v0.0.1 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/onsi/gomega v1.13.0 // indirect
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
github.com/zeebo/errs v1.3.0 // indirect
go.opencensus.io v0.23.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf // indirect
storj.io/drpc v0.0.30 // indirect
)
require (
github.com/Microsoft/go-winio v0.5.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20220623141421-5afb4c282135

2
go.sum
View File

@@ -593,8 +593,6 @@ github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8 h1:IGJQmLBLYBdAknj21W3JsVof0yjEXfy1Q0K3YZebDOg=
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg=
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=

View File

@@ -35,12 +35,12 @@ import (
"github.com/rclone/rclone/librclone/librclone"
_ "github.com/rclone/rclone/backend/all" // import all backends
_ "github.com/rclone/rclone/cmd/cmount" // import cmount
_ "github.com/rclone/rclone/cmd/mount" // import mount
_ "github.com/rclone/rclone/cmd/mount2" // import mount2
_ "github.com/rclone/rclone/fs/operations" // import operations/* rc commands
_ "github.com/rclone/rclone/fs/sync" // import sync/*
_ "github.com/rclone/rclone/lib/plugin" // import plugins
_ "github.com/rclone/rclone/cmd/mount" // import mount
_ "github.com/rclone/rclone/cmd/mount2" // import mount2
_ "github.com/rclone/rclone/cmd/cmount" // import cmount
)
// RcloneInitialize initializes rclone as a library

View File

@@ -39,19 +39,19 @@ type File struct {
inode uint64 // inode number - read only
size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
muRW sync.Mutex // synchronize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
mu sync.RWMutex // protects the following
d *Dir // parent directory
dPath string // path of parent directory. NB dir rename means all Files are flushed
o fs.Object // NB o may be nil if file is being written
leaf string // leaf name of the object
writers []Handle // writers for this file
nwriters int32 // len(writers) which is read/updated with atomic
pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
pendingRenameFun func(ctx context.Context) error // will be run/renamed after all writers close
sys atomic.Value // user defined info to be attached here
nwriters int32 // len(writers) which is read/updated with atomic
appendMode bool // file was opened with O_APPEND
sys atomic.Value // user defined info to be attached here
muRW sync.Mutex // synchronize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
}
// newFile creates a new File

View File

@@ -20,19 +20,19 @@ type ReadFileHandle struct {
baseHandle
done func(ctx context.Context, err error)
mu sync.Mutex
cond sync.Cond // cond lock for out of sequence reads
cond *sync.Cond // cond lock for out of sequence reads
closed bool // set if handle has been closed
r *accounting.Account
readCalled bool // set if read has been called
size int64 // size of the object (0 for unknown length)
offset int64 // offset of read of o
roffset int64 // offset of Read() calls
file *File
hash *hash.MultiHasher
remote string
closed bool // set if handle has been closed
readCalled bool // set if read has been called
noSeek bool
sizeUnknown bool // set if size of source is not known
file *File
hash *hash.MultiHasher
opened bool
remote string
}
// Check interfaces
@@ -63,7 +63,7 @@ func newReadFileHandle(f *File) (*ReadFileHandle, error) {
size: nonNegative(o.Size()),
sizeUnknown: o.Size() < 0,
}
fh.cond = sync.Cond{L: &fh.mu}
fh.cond = sync.NewCond(&fh.mu)
return fh, nil
}
@@ -267,7 +267,7 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
maxBuf = len(p)
}
if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) {
waitSequential("read", fh.remote, &fh.cond, fh.file.VFS().Opt.ReadWait, &fh.offset, off)
waitSequential("read", fh.remote, fh.cond, fh.file.VFS().Opt.ReadWait, &fh.offset, off)
}
doSeek := off != fh.offset
if doSeek && fh.noSeek {

View File

@@ -52,7 +52,7 @@ type Cache struct {
avFn AddVirtualFn // if set, can be called to add dir entries
mu sync.Mutex // protects the following variables
cond sync.Cond // cond lock for synchronous cache cleaning
cond *sync.Cond // cond lock for synchronous cache cleaning
item map[string]*Item // files/directories in the cache
errItems map[string]error // items in error state
used int64 // total size of files in the cache
@@ -139,7 +139,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVir
// Create a channel for cleaner to be kicked upon out of space con
c.kick = make(chan struct{}, 1)
c.cond = sync.Cond{L: &c.mu}
c.cond = sync.NewCond(&c.mu)
go c.cleaner(ctx)
@@ -739,17 +739,27 @@ func (c *Cache) clean(kicked bool) {
oldItems, oldUsed := len(c.item), fs.SizeSuffix(c.used)
c.mu.Unlock()
// Remove any files that are over age
c.purgeOld(c.opt.CacheMaxAge)
// loop cleaning the cache until we reach below cache quota
for {
// Remove any files that are over age
c.purgeOld(c.opt.CacheMaxAge)
// If have a maximum cache size...
if int64(c.opt.CacheMaxSize) > 0 {
// Remove files not in use until cache size is below quota starting from the oldest first
if int64(c.opt.CacheMaxSize) <= 0 {
break
}
// Now remove files not in use until cache size is below quota starting from the
// oldest first
c.purgeOverQuota(int64(c.opt.CacheMaxSize))
// Remove cache files that are not dirty if we are still above the max cache size
c.purgeClean(int64(c.opt.CacheMaxSize))
c.retryFailedResets()
used := c.updateUsed()
if used <= int64(c.opt.CacheMaxSize) && len(c.errItems) == 0 {
break
}
}
// Was kicked?

View File

@@ -57,16 +57,16 @@ type Item struct {
// read only
c *Cache // cache this is part of
mu sync.Mutex // protect the variables
cond sync.Cond // synchronize with cache cleaner
cond *sync.Cond // synchronize with cache cleaner
name string // name in the VFS
opens int // number of times file is open
downloaders *downloaders.Downloaders // a record of the downloaders in action - may be nil
o fs.Object // object we are caching - may be nil
fd *os.File // handle we are using to read and write to the file
modified bool // set if the file has been modified since the last Open
info Info // info about the file to persist to backing store
writeBackID writeback.Handle // id of any writebacks in progress
pendingAccesses int // number of threads - cache reset not allowed if not zero
modified bool // set if the file has been modified since the last Open
beingReset bool // cache cleaner is resetting the cache file, access not allowed
}
@@ -138,7 +138,7 @@ func newItem(c *Cache, name string) (item *Item) {
ATime: now,
},
}
item.cond = sync.Cond{L: &item.mu}
item.cond = sync.NewCond(&item.mu)
// check the cache file exists
osPath := c.toOSPath(name)
fi, statErr := os.Stat(osPath)

View File

@@ -15,17 +15,17 @@ import (
type WriteFileHandle struct {
baseHandle
mu sync.Mutex
cond sync.Cond // cond lock for out of sequence writes
cond *sync.Cond // cond lock for out of sequence writes
closed bool // set if handle has been closed
remote string
pipeWriter *io.PipeWriter
o fs.Object
result chan error
file *File
offset int64
flags int
closed bool // set if handle has been closed
writeCalled bool // set the first time Write() is called
offset int64
opened bool
flags int
truncated bool
}
@@ -43,7 +43,7 @@ func newWriteFileHandle(d *Dir, f *File, remote string, flags int) (*WriteFileHa
result: make(chan error, 1),
file: f,
}
fh.cond = sync.Cond{L: &fh.mu}
fh.cond = sync.NewCond(&fh.mu)
fh.file.addWriter(fh)
return fh, nil
}
@@ -130,7 +130,7 @@ func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) {
return 0, ECLOSED
}
if fh.offset != off {
waitSequential("write", fh.remote, &fh.cond, fh.file.VFS().Opt.WriteWait, &fh.offset, off)
waitSequential("write", fh.remote, fh.cond, fh.file.VFS().Opt.WriteWait, &fh.offset, off)
}
if fh.offset != off {
fs.Errorf(fh.remote, "WriteFileHandle.Write: can't seek in file without --vfs-cache-mode >= writes")