mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 10:33:34 +00:00
Compare commits
42 Commits
v1.59.0
...
fix-6353-m
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c520e52696 | ||
|
|
918bd6d3c3 | ||
|
|
f49be033c6 | ||
|
|
2a817e21cb | ||
|
|
a07d376fb1 | ||
|
|
e749bc58f4 | ||
|
|
821e084f28 | ||
|
|
2170376d1b | ||
|
|
8125b1cf08 | ||
|
|
ba60984f33 | ||
|
|
a875320e37 | ||
|
|
639624184d | ||
|
|
fe84cca1ad | ||
|
|
9d3958bd0b | ||
|
|
3a8e52de74 | ||
|
|
72227a0151 | ||
|
|
9f40cb114a | ||
|
|
2f461f13e3 | ||
|
|
7a24c173f6 | ||
|
|
fb60aeddae | ||
|
|
695736d1e4 | ||
|
|
f0396070eb | ||
|
|
f1166757ba | ||
|
|
9b76434ad5 | ||
|
|
440d0cd179 | ||
|
|
a047d30eca | ||
|
|
03d0f331f7 | ||
|
|
049674aeab | ||
|
|
50f053cada | ||
|
|
140af43c26 | ||
|
|
f467188876 | ||
|
|
4a4379b312 | ||
|
|
8c02fe7b89 | ||
|
|
11be920e90 | ||
|
|
8c19b355a5 | ||
|
|
67fd60275a | ||
|
|
b310490fa5 | ||
|
|
0ee0812a2b | ||
|
|
55bbff6346 | ||
|
|
9c6cfc1ff0 | ||
|
|
f753d7cd42 | ||
|
|
f5be1d6b65 |
62
.github/workflows/build.yml
vendored
62
.github/workflows/build.yml
vendored
@@ -25,12 +25,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.16', 'go1.17']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -39,9 +39,16 @@ jobs:
|
||||
librclonetest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -50,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -67,23 +74,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.19.x'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.16
|
||||
os: ubuntu-latest
|
||||
go: '1.16.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -220,9 +227,12 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# FIXME temporary until golangci-lint supports go1.19
|
||||
go-version: 1.18.x
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
@@ -243,11 +253,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
|
||||
# Upgrade together with Go version. Using a GitHub-provided version saves around 2 minutes.
|
||||
- name: Force NDK version
|
||||
run: echo "y" | sudo ${ANDROID_HOME}/tools/bin/sdkmanager --install "ndk;23.1.7779620" | grep -v = || true
|
||||
go-version: 1.19.x
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
@@ -271,27 +277,29 @@ jobs:
|
||||
go install golang.org/x/mobile/cmd/gobind@latest
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
env PATH=$PATH:~/go/bin gomobile init
|
||||
echo "RCLONE_NDK_VERSION=21" >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a gomobile build
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
run: env PATH=$PATH:~/go/bin gomobile bind -androidapi ${RCLONE_NDK_VERSION} -v -target=android/arm -javapkg=org.rclone -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} github.com/rclone/rclone/librclone/gomobile
|
||||
|
||||
- name: arm-v7a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/armv7a-linux-androideabi${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm' >> $GITHUB_ENV
|
||||
echo 'GOARM=7' >> $GITHUB_ENV
|
||||
echo 'CGO_ENABLED=1' >> $GITHUB_ENV
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm-v7a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-armv7a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv7a .
|
||||
|
||||
- name: arm64-v8a Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=arm64' >> $GITHUB_ENV
|
||||
@@ -299,12 +307,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: arm64-v8a build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-armv8a .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-armv8a .
|
||||
|
||||
- name: x86 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android16-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/i686-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=386' >> $GITHUB_ENV
|
||||
@@ -312,12 +320,12 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x86 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-16-x86 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x86 .
|
||||
|
||||
- name: x64 Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CC=$(echo $ANDROID_HOME/ndk/23.1.7779620/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android21-clang)" >> $GITHUB_ENV
|
||||
echo "CC=$(echo $ANDROID_NDK/toolchains/llvm/prebuilt/linux-x86_64/bin/x86_64-linux-android${RCLONE_NDK_VERSION}-clang)" >> $GITHUB_ENV
|
||||
echo "CC_FOR_TARGET=$CC" >> $GITHUB_ENV
|
||||
echo 'GOOS=android' >> $GITHUB_ENV
|
||||
echo 'GOARCH=amd64' >> $GITHUB_ENV
|
||||
@@ -325,7 +333,7 @@ jobs:
|
||||
echo 'CGO_LDFLAGS=-fuse-ld=lld -s -w' >> $GITHUB_ENV
|
||||
|
||||
- name: x64 build
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-21-x64 .
|
||||
run: go build -v -tags android -trimpath -ldflags '-s -X github.com/rclone/rclone/fs.Version='${VERSION} -o build/rclone-android-${RCLONE_NDK_VERSION}-x64 .
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
|
||||
@@ -145,6 +145,7 @@ func (f *Fs) newUpstream(ctx context.Context, dir, remote string) (*upstream, er
|
||||
dir: dir,
|
||||
pathAdjustment: newAdjustment(f.root, dir),
|
||||
}
|
||||
cache.PinUntilFinalized(u.f, u)
|
||||
return u, nil
|
||||
}
|
||||
|
||||
@@ -206,9 +207,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
f.upstreams[dir] = u
|
||||
if _, found := f.upstreams[dir]; found {
|
||||
err = fmt.Errorf("duplicate directory name %q", dir)
|
||||
} else {
|
||||
f.upstreams[dir] = u
|
||||
}
|
||||
mu.Unlock()
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
|
||||
@@ -758,6 +758,9 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
fs.Errorf(f, "Received Shared Drive file limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
@@ -3299,7 +3302,7 @@ drives found and a combined drive.
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
@@ -28,6 +29,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestDriveScopes(t *testing.T) {
|
||||
@@ -190,6 +192,60 @@ func TestExtensionsForImportFormats(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
gatewayTimeout := googleapi.Error{
|
||||
Code: 503,
|
||||
}
|
||||
timeoutRetry, timeoutError := f.shouldRetry(ctx, &gatewayTimeout)
|
||||
assert.True(t, timeoutRetry)
|
||||
assert.Equal(t, &gatewayTimeout, timeoutError)
|
||||
generic403 := googleapi.Error{
|
||||
Code: 403,
|
||||
}
|
||||
rLEItem := googleapi.ErrorItem{
|
||||
Reason: "rateLimitExceeded",
|
||||
Message: "User rate limit exceeded.",
|
||||
}
|
||||
generic403.Errors = append(generic403.Errors, rLEItem)
|
||||
oldStopUpload := f.opt.StopOnUploadLimit
|
||||
oldStopDownload := f.opt.StopOnDownloadLimit
|
||||
f.opt.StopOnUploadLimit = true
|
||||
f.opt.StopOnDownloadLimit = true
|
||||
defer func() {
|
||||
f.opt.StopOnUploadLimit = oldStopUpload
|
||||
f.opt.StopOnDownloadLimit = oldStopDownload
|
||||
}()
|
||||
expectedRLError := fserrors.FatalError(&generic403)
|
||||
rateLimitRetry, rateLimitErr := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, rateLimitRetry)
|
||||
assert.Equal(t, rateLimitErr, expectedRLError)
|
||||
dQEItem := googleapi.ErrorItem{
|
||||
Reason: "downloadQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = dQEItem
|
||||
expectedDQError := fserrors.FatalError(&generic403)
|
||||
downloadQuotaRetry, downloadQuotaError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, downloadQuotaRetry)
|
||||
assert.Equal(t, downloadQuotaError, expectedDQError)
|
||||
tDFLEItem := googleapi.ErrorItem{
|
||||
Reason: "teamDriveFileLimitExceeded",
|
||||
}
|
||||
generic403.Errors[0] = tDFLEItem
|
||||
expectedTDFLError := fserrors.FatalError(&generic403)
|
||||
teamDriveFileLimitRetry, teamDriveFileLimitError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, teamDriveFileLimitRetry)
|
||||
assert.Equal(t, teamDriveFileLimitError, expectedTDFLError)
|
||||
qEItem := googleapi.ErrorItem{
|
||||
Reason: "quotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = qEItem
|
||||
expectedQuotaError := fserrors.FatalError(&generic403)
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
oldAllow := f.opt.AllowImportNameChange
|
||||
f.opt.AllowImportNameChange = true
|
||||
@@ -545,6 +601,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -304,6 +304,9 @@ outer:
|
||||
//
|
||||
// Can be called from atexit handler
|
||||
func (b *batcher) Shutdown() {
|
||||
if !b.Batching() {
|
||||
return
|
||||
}
|
||||
b.shutOnce.Do(func() {
|
||||
atexit.Unregister(b.atexit)
|
||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
||||
|
||||
@@ -1435,7 +1435,7 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
||||
}
|
||||
|
||||
if entryPath != "" {
|
||||
notifyFunc(entryPath, entryType)
|
||||
notifyFunc(f.opt.Enc.ToStandardPath(entryPath), entryType)
|
||||
}
|
||||
}
|
||||
if !changeList.HasMore {
|
||||
@@ -1697,6 +1697,9 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
||||
if size > 0 {
|
||||
// if size is known, check if next chunk is final
|
||||
appendArg.Close = uint64(size)-in.BytesRead() <= uint64(chunkSize)
|
||||
if in.BytesRead() > uint64(size) {
|
||||
return nil, fmt.Errorf("expected %d bytes in input, but have read %d so far", size, in.BytesRead())
|
||||
}
|
||||
} else {
|
||||
// if size is unknown, upload as long as we can read full chunks from the reader
|
||||
appendArg.Close = in.BytesRead()-cursor.Offset < uint64(chunkSize)
|
||||
|
||||
@@ -42,54 +42,70 @@ func init() {
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: map[string]fs.MetadataHelp{
|
||||
"name": {
|
||||
Help: "Full file path, without the bucket part",
|
||||
Type: "filename",
|
||||
Example: "backend/internetarchive/internetarchive.go",
|
||||
Help: "Full file path, without the bucket part",
|
||||
Type: "filename",
|
||||
Example: "backend/internetarchive/internetarchive.go",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"source": {
|
||||
Help: "The source of the file",
|
||||
Type: "string",
|
||||
Example: "original",
|
||||
Help: "The source of the file",
|
||||
Type: "string",
|
||||
Example: "original",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification, managed by Rclone",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
Help: "Time of last modification, managed by Rclone",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"size": {
|
||||
Help: "File size in bytes",
|
||||
Type: "decimal number",
|
||||
Example: "123456",
|
||||
Help: "File size in bytes",
|
||||
Type: "decimal number",
|
||||
Example: "123456",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"md5": {
|
||||
Help: "MD5 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567012345670123456701234567",
|
||||
Help: "MD5 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567012345670123456701234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"crc32": {
|
||||
Help: "CRC32 calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567",
|
||||
Help: "CRC32 calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "01234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"sha1": {
|
||||
Help: "SHA1 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "0123456701234567012345670123456701234567",
|
||||
Help: "SHA1 hash calculated by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "0123456701234567012345670123456701234567",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"format": {
|
||||
Help: "Name of format identified by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "Comma-Separated Values",
|
||||
Help: "Name of format identified by Internet Archive",
|
||||
Type: "string",
|
||||
Example: "Comma-Separated Values",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"old_version": {
|
||||
Help: "Whether the file was replaced and moved by keep-old-version flag",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
Help: "Whether the file was replaced and moved by keep-old-version flag",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"viruscheck": {
|
||||
Help: "The last time viruscheck process was run for the file (?)",
|
||||
Type: "unixtime",
|
||||
Example: "1654191352",
|
||||
Help: "The last time viruscheck process was run for the file (?)",
|
||||
Type: "unixtime",
|
||||
Example: "1654191352",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"summation": {
|
||||
Help: "Check https://forum.rclone.org/t/31922 for how it is used",
|
||||
Type: "string",
|
||||
Example: "md5",
|
||||
ReadOnly: true,
|
||||
},
|
||||
|
||||
"rclone-ia-mtime": {
|
||||
@@ -173,7 +189,7 @@ var roMetadataKey = map[string]interface{}{
|
||||
// do not add mtime here, it's a documented exception
|
||||
"name": nil, "source": nil, "size": nil, "md5": nil,
|
||||
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
||||
"viruscheck": nil,
|
||||
"viruscheck": nil, "summation": nil,
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
@@ -222,6 +238,7 @@ type IAFile struct {
|
||||
Md5 string `json:"md5"`
|
||||
Crc32 string `json:"crc32"`
|
||||
Sha1 string `json:"sha1"`
|
||||
Summation string `json:"summation"`
|
||||
|
||||
rawData json.RawMessage
|
||||
}
|
||||
@@ -555,7 +572,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, bucketPath), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@@ -743,7 +760,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, o.fs.opt.Enc.FromStandardPath(o.remote)),
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1135,16 +1152,21 @@ func (f *Fs) waitDelete(ctx context.Context, bucket, bucketPath string) (err err
|
||||
}
|
||||
|
||||
func makeValidObject(f *Fs, remote string, file IAFile, mtime time.Time, size int64) *Object {
|
||||
return &Object{
|
||||
ret := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
modTime: mtime,
|
||||
size: size,
|
||||
md5: file.Md5,
|
||||
crc32: file.Crc32,
|
||||
sha1: file.Sha1,
|
||||
rawData: file.rawData,
|
||||
}
|
||||
// hashes from _files.xml (where summation != "") is different from one in other files
|
||||
// https://forum.rclone.org/t/internet-archive-md5-tag-in-id-files-xml-interpreted-incorrectly/31922
|
||||
if file.Summation == "" {
|
||||
ret.md5 = file.Md5
|
||||
ret.crc32 = file.Crc32
|
||||
ret.sha1 = file.Sha1
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func makeValidObject2(f *Fs, file IAFile, bucket string) *Object {
|
||||
|
||||
@@ -152,7 +152,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
m.Set(configClientSecret, "")
|
||||
|
||||
srv := rest.NewClient(fshttp.NewClient(ctx))
|
||||
token, tokenEndpoint, username, err := doTokenAuth(ctx, srv, loginToken)
|
||||
token, tokenEndpoint, err := doTokenAuth(ctx, srv, loginToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get oauth token: %w", err)
|
||||
}
|
||||
@@ -161,7 +161,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
m.Set(configUsername, username)
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "legacy": // configure a jottacloud backend using legacy authentication
|
||||
m.Set("configVersion", fmt.Sprint(legacyConfigVersion))
|
||||
@@ -272,30 +271,21 @@ sync or the backup section, for example, you must choose yes.`)
|
||||
if config.Result != "true" {
|
||||
m.Set(configDevice, "")
|
||||
m.Set(configMountpoint, "")
|
||||
}
|
||||
username, userOk := m.Get(configUsername)
|
||||
if userOk && config.Result != "true" {
|
||||
return fs.ConfigGoto("end")
|
||||
}
|
||||
oAuthClient, _, err := getOAuthClient(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !userOk {
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
username = cust.Username
|
||||
m.Set(configUsername, username)
|
||||
if config.Result != "true" {
|
||||
return fs.ConfigGoto("end")
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, username)
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -326,10 +316,14 @@ a new by entering a unique name.`, defaultDevice)
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
username, _ := m.Get(configUsername)
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, username)
|
||||
acc, err := getDriveInfo(ctx, jfsSrv, cust.Username)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -344,7 +338,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
var dev *api.JottaDevice
|
||||
if isNew {
|
||||
fs.Debugf(nil, "Creating new device: %s", device)
|
||||
dev, err = createDevice(ctx, jfsSrv, path.Join(username, device))
|
||||
dev, err = createDevice(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -352,7 +346,7 @@ a new by entering a unique name.`, defaultDevice)
|
||||
m.Set(configDevice, device)
|
||||
|
||||
if !isNew {
|
||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(username, device))
|
||||
dev, err = getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -382,11 +376,16 @@ You may create a new by entering a unique name.`, device)
|
||||
return nil, err
|
||||
}
|
||||
jfsSrv := rest.NewClient(oAuthClient).SetRoot(jfsURL)
|
||||
apiSrv := rest.NewClient(oAuthClient).SetRoot(apiURL)
|
||||
|
||||
cust, err := getCustomerInfo(ctx, apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
username, _ := m.Get(configUsername)
|
||||
device, _ := m.Get(configDevice)
|
||||
|
||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(username, device))
|
||||
dev, err := getDeviceInfo(ctx, jfsSrv, path.Join(cust.Username, device))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -404,7 +403,7 @@ You may create a new by entering a unique name.`, device)
|
||||
return nil, fmt.Errorf("custom mountpoints not supported on built-in %s device: %w", defaultDevice, err)
|
||||
}
|
||||
fs.Debugf(nil, "Creating new mountpoint: %s", mountpoint)
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(username, device, mountpoint))
|
||||
_, err := createMountPoint(ctx, jfsSrv, path.Join(cust.Username, device, mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -591,10 +590,10 @@ func doLegacyAuth(ctx context.Context, srv *rest.Client, oauthConfig *oauth2.Con
|
||||
}
|
||||
|
||||
// doTokenAuth runs the actual token request for V2 authentication
|
||||
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, username string, err error) {
|
||||
func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 string) (token oauth2.Token, tokenEndpoint string, err error) {
|
||||
loginTokenBytes, err := base64.RawURLEncoding.DecodeString(loginTokenBase64)
|
||||
if err != nil {
|
||||
return token, "", "", err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// decode login token
|
||||
@@ -602,7 +601,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
|
||||
err = decoder.Decode(&loginToken)
|
||||
if err != nil {
|
||||
return token, "", "", err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// retrieve endpoint urls
|
||||
@@ -613,7 +612,7 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
var wellKnown api.WellKnown
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &wellKnown)
|
||||
if err != nil {
|
||||
return token, "", "", err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
// prepare out token request with username and password
|
||||
@@ -635,14 +634,14 @@ func doTokenAuth(ctx context.Context, apiSrv *rest.Client, loginTokenBase64 stri
|
||||
var jsonToken api.TokenJSON
|
||||
_, err = apiSrv.CallJSON(ctx, &opts, nil, &jsonToken)
|
||||
if err != nil {
|
||||
return token, "", "", err
|
||||
return token, "", err
|
||||
}
|
||||
|
||||
token.AccessToken = jsonToken.AccessToken
|
||||
token.RefreshToken = jsonToken.RefreshToken
|
||||
token.TokenType = jsonToken.TokenType
|
||||
token.Expiry = time.Now().Add(time.Duration(jsonToken.ExpiresIn) * time.Second)
|
||||
return token, wellKnown.TokenEndpoint, loginToken.Username, err
|
||||
return token, wellKnown.TokenEndpoint, err
|
||||
}
|
||||
|
||||
// getCustomerInfo queries general information about the account
|
||||
@@ -944,17 +943,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return err
|
||||
})
|
||||
|
||||
user, userOk := m.Get(configUsername)
|
||||
if userOk {
|
||||
f.user = user
|
||||
} else {
|
||||
fs.Infof(nil, "Username not found in config and must be looked up, reconfigure to avoid the extra request")
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
cust, err := getCustomerInfo(ctx, f.apiSrv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.user = cust.Username
|
||||
f.setEndpoints()
|
||||
|
||||
if root != "" && !rootIsDir {
|
||||
|
||||
@@ -118,7 +118,7 @@ func init() {
|
||||
Help: "Microsoft Cloud Germany",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by 21Vianet in China",
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
@@ -2184,7 +2184,7 @@ func (o *Object) ID() string {
|
||||
* 3. To avoid region-related issues, please don't manually build rest.Opts from scratch.
|
||||
* Instead, use these helper function, and customize the URL afterwards if needed.
|
||||
*
|
||||
* currently, the 21ViaNet's API differs in the following places:
|
||||
* currently, the Vnet Group's API differs in the following places:
|
||||
* - https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route}
|
||||
* - this API doesn't work (gives invalid request)
|
||||
* - can be replaced with the following API:
|
||||
@@ -2233,7 +2233,7 @@ func escapeSingleQuote(str string) string {
|
||||
// newOptsCallWithIDPath build the rest.Opts structure with *a normalizedID (driveID#fileID, or simply fileID) and leaf*
|
||||
// using url template https://{Endpoint}/drives/{driveID}/items/{leaf}:/{route} (for international OneDrive)
|
||||
// or https://{Endpoint}/drives/{driveID}/items/children('{leaf}')/{route}
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for 21ViaNet)
|
||||
// and https://{Endpoint}/drives/{driveID}/items/children('@a1')/{route}?@a1=URLEncode("'{leaf}'") (for Vnet Group)
|
||||
// if isPath is false, this function will only work when the leaf is "" or a child name (i.e. it doesn't accept multi-level leaf)
|
||||
// if isPath is true, multi-level leaf like a/b/c can be passed
|
||||
func (f *Fs) newOptsCallWithIDPath(normalizedID string, leaf string, isPath bool, method string, route string) (opts rest.Opts, ok bool) {
|
||||
|
||||
@@ -4570,7 +4570,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
var head s3.HeadObjectOutput
|
||||
structs.SetFrom(&head, req)
|
||||
structs.SetFrom(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
||||
head.ContentLength = &size
|
||||
// If we have done a single part PUT request then we can read these
|
||||
|
||||
@@ -67,8 +67,26 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestNoHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
// Set NoHead for this test
|
||||
f.opt.NoHead = true
|
||||
defer func() {
|
||||
f.opt.NoHead = false
|
||||
}()
|
||||
contents := random.String(1000)
|
||||
item := fstest.NewItem("test-no-head", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
defer func() {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}()
|
||||
// PutTestcontests checks the received object
|
||||
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("NoHead", f.InternalTestNoHead)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -935,11 +935,22 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
// It appears that WS FTP doesn't like relative paths,
|
||||
// and the openssh sftp tool also uses absolute paths.
|
||||
if !path.IsAbs(f.root) {
|
||||
path, err := c.sftpClient.RealPath(f.root)
|
||||
// Trying RealPath first, to perform proper server-side canonicalize.
|
||||
// It may fail (SSH_FX_FAILURE reported on WS FTP) and will then resort
|
||||
// to simple path join with current directory from Getwd (which can work
|
||||
// on WS FTP, even though it is also based on RealPath).
|
||||
absRoot, err := c.sftpClient.RealPath(f.root)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to resolve path - using relative paths: %v", err)
|
||||
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
|
||||
cwd, err := c.sftpClient.Getwd()
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
|
||||
} else {
|
||||
f.absRoot = path.Join(cwd, f.root)
|
||||
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)
|
||||
}
|
||||
} else {
|
||||
f.absRoot = path
|
||||
f.absRoot = absRoot
|
||||
fs.Debugf(f, "Relative path resolved to %q", f.absRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -169,7 +169,11 @@ func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return upstreams, nil
|
||||
// If created roots then choose one
|
||||
if dir == "" {
|
||||
upstreams, err = f.create(ctx, dir)
|
||||
}
|
||||
return upstreams, err
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
@@ -834,6 +838,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
|
||||
errs := Errors(make([]error, len(opt.Upstreams)))
|
||||
multithread(len(opt.Upstreams), func(i int) {
|
||||
|
||||
@@ -24,6 +24,10 @@ var (
|
||||
|
||||
// Fs is a wrap of any fs and its configs
|
||||
type Fs struct {
|
||||
// In order to ensure memory alignment on 32-bit architectures
|
||||
// when this field is accessed through sync/atomic functions,
|
||||
// it must be the first entry in the struct
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
fs.Fs
|
||||
RootFs fs.Fs
|
||||
RootPath string
|
||||
@@ -32,7 +36,6 @@ type Fs struct {
|
||||
creatable bool
|
||||
usage *fs.Usage // Cache the usage
|
||||
cacheTime time.Duration // cache duration
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
cacheMutex sync.RWMutex
|
||||
cacheOnce sync.Once
|
||||
cacheUpdate bool // if the cache is updating
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
// Package restic serves a remote suitable for use with restic
|
||||
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
// Serve restic tests set up a server and run the integration tests
|
||||
// for restic against it.
|
||||
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
// Build for restic for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// Command definition for cobra
|
||||
var Command *cobra.Command
|
||||
@@ -1,6 +1,3 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
package restic
|
||||
|
||||
import (
|
||||
|
||||
@@ -49,6 +49,11 @@ extended explanation in the [copy](/commands/rclone_copy/) command if unsure.
|
||||
If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
It is not possible to sync overlapping remotes. However, you may exclude
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
exclude-if-present file inside the destination directory and sync to a
|
||||
destination that is inside the source directory.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
|
||||
**Note**: Use the ` + "`rclone dedupe`" + ` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||
|
||||
@@ -626,3 +626,7 @@ put them back in again.` >}}
|
||||
* Lorenzo Maiorfi <maiorfi@gmail.com>
|
||||
* Claudio Maradonna <penguyman@stronzi.org>
|
||||
* Ovidiu Victor Tatar <ovi.tatar@googlemail.com>
|
||||
* Evan Spensley <epspensley@gmail.com>
|
||||
* Yen Hu <61753151+0x59656e@users.noreply.github.com>
|
||||
* Steve Kowalik <steven@wedontsleep.org>
|
||||
* Jordi Gonzalez Muñoz <jordigonzm@gmail.com>
|
||||
|
||||
@@ -116,7 +116,7 @@ This would produce something like this:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
If you then add that config to your config file (find it with `rclone
|
||||
config file`) then you can access all the shared drives in one place
|
||||
|
||||
@@ -582,7 +582,8 @@ been added) in DIR, then it will be overwritten.
|
||||
|
||||
The remote in use must support server-side move or copy and you must
|
||||
use the same remote as the destination of the sync. The backup
|
||||
directory must not overlap the destination directory.
|
||||
directory must not overlap the destination directory without it being
|
||||
excluded by a filter rule.
|
||||
|
||||
For example
|
||||
|
||||
@@ -1643,6 +1644,18 @@ This sets the interval between each retry specified by `--retries`
|
||||
|
||||
The default is `0`. Use `0` to disable.
|
||||
|
||||
### --server-side-across-configs ###
|
||||
|
||||
Allow server-side operations (e.g. copy or move) to work across
|
||||
different configurations.
|
||||
|
||||
This can be useful if you wish to do a server-side copy or move
|
||||
between two remotes which use the same backend but are configured
|
||||
differently.
|
||||
|
||||
Note that this isn't enabled by default because it isn't easy for
|
||||
rclone to tell if it will work between any two configurations.
|
||||
|
||||
### --size-only ###
|
||||
|
||||
Normally rclone will look at modification time and size of files to
|
||||
|
||||
@@ -1332,7 +1332,7 @@ drives found and a combined drive.
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
remote = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
|
||||
Adding this to the rclone config file will cause those team drives to
|
||||
be accessible with the aliases shown. Any illegal charactes will be
|
||||
|
||||
@@ -191,7 +191,7 @@ kill %1
|
||||
## Install from source
|
||||
|
||||
Make sure you have git and [Go](https://golang.org/) installed.
|
||||
Go version 1.16 or newer is required, latest release is recommended.
|
||||
Go version 1.17 or newer is required, latest release is recommended.
|
||||
You can get it from your package manager, or download it from
|
||||
[golang.org/dl](https://golang.org/dl/). Then you can run the following:
|
||||
|
||||
@@ -318,7 +318,7 @@ such as a regular [sync](https://rclone.org/commands/rclone_sync/), you will pro
|
||||
to configure your rclone command in your operating system's scheduler. If you need to
|
||||
expose *service*-like features, such as [remote control](https://rclone.org/rc/),
|
||||
[GUI](https://rclone.org/gui/), [serve](https://rclone.org/commands/rclone_serve/)
|
||||
or [mount](https://rclone.org/commands/rclone_move/), you will often want an rclone
|
||||
or [mount](https://rclone.org/commands/rclone_mount/), you will often want an rclone
|
||||
command always running in the background, and configuring it to run in a service infrastructure
|
||||
may be a better option. Below are some alternatives on how to achieve this on
|
||||
different operating systems.
|
||||
@@ -351,7 +351,7 @@ c:\rclone\rclone.exe sync c:\files remote:/files --no-console --log-file c:\rclo
|
||||
|
||||
#### User account
|
||||
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_move/) documentation,
|
||||
As mentioned in the [mount](https://rclone.org/commands/rclone_mount/) documentation,
|
||||
mounted drives created as Administrator are not visible to other accounts, not even the
|
||||
account that was elevated as Administrator. By running the mount command as the
|
||||
built-in `SYSTEM` user account, it will create drives accessible for everyone on
|
||||
|
||||
@@ -12,11 +12,10 @@ Refer to [IAS3 API documentation](https://archive.org/services/docs/api/ias3.htm
|
||||
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
|
||||
command.) You may put subdirectories in too, e.g. `remote:item/path/to/dir`.
|
||||
|
||||
Once you have made a remote (see the provider specific section above)
|
||||
you can use it like this:
|
||||
|
||||
Unlike S3, listing up all items uploaded by you isn't supported.
|
||||
|
||||
Once you have made a remote, you can use it like this:
|
||||
|
||||
Make a new item
|
||||
|
||||
rclone mkdir remote:item
|
||||
@@ -53,6 +52,7 @@ The following are reserved by Internet Archive:
|
||||
- `format`
|
||||
- `old_version`
|
||||
- `viruscheck`
|
||||
- `summation`
|
||||
|
||||
Trying to set values to these keys is ignored with a warning.
|
||||
Only setting `mtime` is an exception. Doing so make it the identical behavior as setting ModTime.
|
||||
@@ -258,19 +258,20 @@ Here are the possible system metadata items for the internetarchive backend.
|
||||
|
||||
| Name | Help | Type | Example | Read Only |
|
||||
|------|------|------|---------|-----------|
|
||||
| crc32 | CRC32 calculated by Internet Archive | string | 01234567 | N |
|
||||
| format | Name of format identified by Internet Archive | string | Comma-Separated Values | N |
|
||||
| md5 | MD5 hash calculated by Internet Archive | string | 01234567012345670123456701234567 | N |
|
||||
| mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| name | Full file path, without the bucket part | filename | backend/internetarchive/internetarchive.go | N |
|
||||
| old_version | Whether the file was replaced and moved by keep-old-version flag | boolean | true | N |
|
||||
| crc32 | CRC32 calculated by Internet Archive | string | 01234567 | **Y** |
|
||||
| format | Name of format identified by Internet Archive | string | Comma-Separated Values | **Y** |
|
||||
| md5 | MD5 hash calculated by Internet Archive | string | 01234567012345670123456701234567 | **Y** |
|
||||
| mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | **Y** |
|
||||
| name | Full file path, without the bucket part | filename | backend/internetarchive/internetarchive.go | **Y** |
|
||||
| old_version | Whether the file was replaced and moved by keep-old-version flag | boolean | true | **Y** |
|
||||
| rclone-ia-mtime | Time of last modification, managed by Internet Archive | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| rclone-mtime | Time of last modification, managed by Rclone | RFC 3339 | 2006-01-02T15:04:05.999999999Z | N |
|
||||
| rclone-update-track | Random value used by Rclone for tracking changes inside Internet Archive | string | aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | N |
|
||||
| sha1 | SHA1 hash calculated by Internet Archive | string | 0123456701234567012345670123456701234567 | N |
|
||||
| size | File size in bytes | decimal number | 123456 | N |
|
||||
| source | The source of the file | string | original | N |
|
||||
| viruscheck | The last time viruscheck process was run for the file (?) | unixtime | 1654191352 | N |
|
||||
| sha1 | SHA1 hash calculated by Internet Archive | string | 0123456701234567012345670123456701234567 | **Y** |
|
||||
| size | File size in bytes | decimal number | 123456 | **Y** |
|
||||
| source | The source of the file | string | original | **Y** |
|
||||
| summation | Check https://forum.rclone.org/t/31922 for how it is used | string | md5 | **Y** |
|
||||
| viruscheck | The last time viruscheck process was run for the file (?) | unixtime | 1654191352 | **Y** |
|
||||
|
||||
See the [metadata](/docs/#metadata) docs for more info.
|
||||
|
||||
|
||||
@@ -263,7 +263,7 @@ Properties:
|
||||
- "de"
|
||||
- Microsoft Cloud Germany
|
||||
- "cn"
|
||||
- Azure and Office 365 operated by 21Vianet in China
|
||||
- Azure and Office 365 operated by Vnet Group in China
|
||||
|
||||
### Advanced options
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.59.0
|
||||
v1.60.0
|
||||
181
fs/config.go
181
fs/config.go
@@ -45,96 +45,97 @@ var (
|
||||
|
||||
// ConfigInfo is filesystem config options
|
||||
type ConfigInfo struct {
|
||||
LogLevel LogLevel
|
||||
StatsLogLevel LogLevel
|
||||
UseJSONLog bool
|
||||
DryRun bool
|
||||
Interactive bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
IgnoreTimes bool
|
||||
IgnoreExisting bool
|
||||
IgnoreErrors bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
ExpectContinueTimeout time.Duration
|
||||
Dump DumpFlags
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
DeleteMode DeleteMode
|
||||
MaxDelete int64
|
||||
TrackRenames bool // Track file renames.
|
||||
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
|
||||
LowLevelRetries int
|
||||
UpdateOlder bool // Skip files that are newer on the destination
|
||||
NoGzip bool // Disable compression
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
IgnoreCaseSync bool
|
||||
NoTraverse bool
|
||||
CheckFirst bool
|
||||
NoCheckDest bool
|
||||
NoUnicodeNormalization bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
CompareDest []string
|
||||
CopyDest []string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
SuffixKeepExtension bool
|
||||
UseListR bool
|
||||
BufferSize SizeSuffix
|
||||
BwLimit BwTimetable
|
||||
BwLimitFile BwTimetable
|
||||
TPSLimit float64
|
||||
TPSLimitBurst int
|
||||
BindAddr net.IP
|
||||
DisableFeatures []string
|
||||
UserAgent string
|
||||
Immutable bool
|
||||
AutoConfirm bool
|
||||
StreamingUploadCutoff SizeSuffix
|
||||
StatsFileNameLength int
|
||||
AskPassword bool
|
||||
PasswordCommand SpaceSepList
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxDuration time.Duration
|
||||
CutoffMode CutoffMode
|
||||
MaxBacklog int
|
||||
MaxStatsGroups int
|
||||
StatsOneLine bool
|
||||
StatsOneLineDate bool // If we want a date prefix at all
|
||||
StatsOneLineDateFormat string // If we want to customize the prefix
|
||||
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
|
||||
Progress bool
|
||||
ProgressTerminalTitle bool
|
||||
Cookie bool
|
||||
UseMmap bool
|
||||
CaCert string // Client Side CA
|
||||
ClientCert string // Client Side Cert
|
||||
ClientKey string // Client Side Key
|
||||
MultiThreadCutoff SizeSuffix
|
||||
MultiThreadStreams int
|
||||
MultiThreadSet bool // whether MultiThreadStreams was set (set in fs/config/configflags)
|
||||
OrderBy string // instructions on how to order the transfer
|
||||
UploadHeaders []*HTTPOption
|
||||
DownloadHeaders []*HTTPOption
|
||||
Headers []*HTTPOption
|
||||
MetadataSet Metadata // extra metadata to write when uploading
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
DisableHTTP2 bool
|
||||
HumanReadable bool
|
||||
KvLockTime time.Duration // maximum time to keep key-value database locked by process
|
||||
DisableHTTPKeepAlives bool
|
||||
Metadata bool
|
||||
LogLevel LogLevel
|
||||
StatsLogLevel LogLevel
|
||||
UseJSONLog bool
|
||||
DryRun bool
|
||||
Interactive bool
|
||||
CheckSum bool
|
||||
SizeOnly bool
|
||||
IgnoreTimes bool
|
||||
IgnoreExisting bool
|
||||
IgnoreErrors bool
|
||||
ModifyWindow time.Duration
|
||||
Checkers int
|
||||
Transfers int
|
||||
ConnectTimeout time.Duration // Connect timeout
|
||||
Timeout time.Duration // Data channel timeout
|
||||
ExpectContinueTimeout time.Duration
|
||||
Dump DumpFlags
|
||||
InsecureSkipVerify bool // Skip server certificate verification
|
||||
DeleteMode DeleteMode
|
||||
MaxDelete int64
|
||||
TrackRenames bool // Track file renames.
|
||||
TrackRenamesStrategy string // Comma separated list of strategies used to track renames
|
||||
LowLevelRetries int
|
||||
UpdateOlder bool // Skip files that are newer on the destination
|
||||
NoGzip bool // Disable compression
|
||||
MaxDepth int
|
||||
IgnoreSize bool
|
||||
IgnoreChecksum bool
|
||||
IgnoreCaseSync bool
|
||||
NoTraverse bool
|
||||
CheckFirst bool
|
||||
NoCheckDest bool
|
||||
NoUnicodeNormalization bool
|
||||
NoUpdateModTime bool
|
||||
DataRateUnit string
|
||||
CompareDest []string
|
||||
CopyDest []string
|
||||
BackupDir string
|
||||
Suffix string
|
||||
SuffixKeepExtension bool
|
||||
UseListR bool
|
||||
BufferSize SizeSuffix
|
||||
BwLimit BwTimetable
|
||||
BwLimitFile BwTimetable
|
||||
TPSLimit float64
|
||||
TPSLimitBurst int
|
||||
BindAddr net.IP
|
||||
DisableFeatures []string
|
||||
UserAgent string
|
||||
Immutable bool
|
||||
AutoConfirm bool
|
||||
StreamingUploadCutoff SizeSuffix
|
||||
StatsFileNameLength int
|
||||
AskPassword bool
|
||||
PasswordCommand SpaceSepList
|
||||
UseServerModTime bool
|
||||
MaxTransfer SizeSuffix
|
||||
MaxDuration time.Duration
|
||||
CutoffMode CutoffMode
|
||||
MaxBacklog int
|
||||
MaxStatsGroups int
|
||||
StatsOneLine bool
|
||||
StatsOneLineDate bool // If we want a date prefix at all
|
||||
StatsOneLineDateFormat string // If we want to customize the prefix
|
||||
ErrorOnNoTransfer bool // Set appropriate exit code if no files transferred
|
||||
Progress bool
|
||||
ProgressTerminalTitle bool
|
||||
Cookie bool
|
||||
UseMmap bool
|
||||
CaCert string // Client Side CA
|
||||
ClientCert string // Client Side Cert
|
||||
ClientKey string // Client Side Key
|
||||
MultiThreadCutoff SizeSuffix
|
||||
MultiThreadStreams int
|
||||
MultiThreadSet bool // whether MultiThreadStreams was set (set in fs/config/configflags)
|
||||
OrderBy string // instructions on how to order the transfer
|
||||
UploadHeaders []*HTTPOption
|
||||
DownloadHeaders []*HTTPOption
|
||||
Headers []*HTTPOption
|
||||
MetadataSet Metadata // extra metadata to write when uploading
|
||||
RefreshTimes bool
|
||||
NoConsole bool
|
||||
TrafficClass uint8
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
DisableHTTP2 bool
|
||||
HumanReadable bool
|
||||
KvLockTime time.Duration // maximum time to keep key-value database locked by process
|
||||
DisableHTTPKeepAlives bool
|
||||
Metadata bool
|
||||
ServerSideAcrossConfigs bool
|
||||
}
|
||||
|
||||
// NewConfig creates a new config with everything set to the default
|
||||
|
||||
@@ -141,6 +141,7 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.DurationVarP(flagSet, &ci.KvLockTime, "kv-lock-time", "", ci.KvLockTime, "Maximum time to keep key-value database locked by process")
|
||||
flags.BoolVarP(flagSet, &ci.DisableHTTPKeepAlives, "disable-http-keep-alives", "", ci.DisableHTTPKeepAlives, "Disable HTTP keep-alives and use each connection once.")
|
||||
flags.BoolVarP(flagSet, &ci.Metadata, "metadata", "M", ci.Metadata, "If set, preserve metadata when copying objects")
|
||||
flags.BoolVarP(flagSet, &ci.ServerSideAcrossConfigs, "server-side-across-configs", "", ci.ServerSideAcrossConfigs, "Allow server-side operations (e.g. copy) to work across different configs")
|
||||
}
|
||||
|
||||
// ParseHeaders converts the strings passed in via the header flags into HTTPOptions
|
||||
|
||||
@@ -2,14 +2,11 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -22,80 +19,19 @@ import (
|
||||
// This is accessed through GetConfig and AddConfig
|
||||
var globalConfig = mustNewFilter(nil)
|
||||
|
||||
// rule is one filter rule
|
||||
type rule struct {
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Match returns true if rule matches path
|
||||
func (r *rule) Match(path string) bool {
|
||||
return r.Regexp.MatchString(path)
|
||||
}
|
||||
|
||||
// String the rule
|
||||
func (r *rule) String() string {
|
||||
c := "-"
|
||||
if r.Include {
|
||||
c = "+"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", c, r.Regexp.String())
|
||||
}
|
||||
|
||||
// rules is a slice of rules
|
||||
type rules struct {
|
||||
rules []rule
|
||||
existing map[string]struct{}
|
||||
}
|
||||
|
||||
// add adds a rule if it doesn't exist already
|
||||
func (rs *rules) add(Include bool, re *regexp.Regexp) {
|
||||
if rs.existing == nil {
|
||||
rs.existing = make(map[string]struct{})
|
||||
}
|
||||
newRule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
newRuleString := newRule.String()
|
||||
if _, ok := rs.existing[newRuleString]; ok {
|
||||
return // rule already exists
|
||||
}
|
||||
rs.rules = append(rs.rules, newRule)
|
||||
rs.existing[newRuleString] = struct{}{}
|
||||
}
|
||||
|
||||
// clear clears all the rules
|
||||
func (rs *rules) clear() {
|
||||
rs.rules = nil
|
||||
rs.existing = nil
|
||||
}
|
||||
|
||||
// len returns the number of rules
|
||||
func (rs *rules) len() int {
|
||||
return len(rs.rules)
|
||||
}
|
||||
|
||||
// FilesMap describes the map of files to transfer
|
||||
type FilesMap map[string]struct{}
|
||||
|
||||
// Opt configures the filter
|
||||
type Opt struct {
|
||||
DeleteExcluded bool
|
||||
FilterRule []string
|
||||
FilterFrom []string
|
||||
ExcludeRule []string
|
||||
ExcludeFrom []string
|
||||
ExcludeFile []string
|
||||
IncludeRule []string
|
||||
IncludeFrom []string
|
||||
FilesFrom []string
|
||||
FilesFromRaw []string
|
||||
MinAge fs.Duration
|
||||
MaxAge fs.Duration
|
||||
MinSize fs.SizeSuffix
|
||||
MaxSize fs.SizeSuffix
|
||||
IgnoreCase bool
|
||||
RulesOpt
|
||||
ExcludeFile []string
|
||||
MetaRules RulesOpt
|
||||
FilesFrom []string
|
||||
FilesFromRaw []string
|
||||
MinAge fs.Duration
|
||||
MaxAge fs.Duration
|
||||
MinSize fs.SizeSuffix
|
||||
MaxSize fs.SizeSuffix
|
||||
IgnoreCase bool
|
||||
}
|
||||
|
||||
// DefaultOpt is the default config for the filter
|
||||
@@ -106,6 +42,9 @@ var DefaultOpt = Opt{
|
||||
MaxSize: fs.SizeSuffix(-1),
|
||||
}
|
||||
|
||||
// FilesMap describes the map of files to transfer
|
||||
type FilesMap map[string]struct{}
|
||||
|
||||
// Filter describes any filtering in operation
|
||||
type Filter struct {
|
||||
Opt Opt
|
||||
@@ -113,6 +52,7 @@ type Filter struct {
|
||||
ModTimeTo time.Time
|
||||
fileRules rules
|
||||
dirRules rules
|
||||
metaRules rules
|
||||
files FilesMap // files if filesFrom
|
||||
dirs FilesMap // dirs from filesFrom
|
||||
}
|
||||
@@ -142,57 +82,14 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
|
||||
fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom)
|
||||
}
|
||||
|
||||
addImplicitExclude := false
|
||||
foundExcludeRule := false
|
||||
|
||||
for _, rule := range f.Opt.IncludeRule {
|
||||
err = f.Add(true, rule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
for _, rule := range f.Opt.IncludeFrom {
|
||||
err := forEachLine(rule, false, func(line string) error {
|
||||
return f.Add(true, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
for _, rule := range f.Opt.ExcludeRule {
|
||||
err = f.Add(false, rule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
foundExcludeRule = true
|
||||
}
|
||||
for _, rule := range f.Opt.ExcludeFrom {
|
||||
err := forEachLine(rule, false, func(line string) error {
|
||||
return f.Add(false, line)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
foundExcludeRule = true
|
||||
err = parseRules(&f.Opt.RulesOpt, f.Add, f.Clear)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if addImplicitExclude && foundExcludeRule {
|
||||
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
}
|
||||
|
||||
for _, rule := range f.Opt.FilterRule {
|
||||
err = f.AddRule(rule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
for _, rule := range f.Opt.FilterFrom {
|
||||
err := forEachLine(rule, false, f.AddRule)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = parseRules(&f.Opt.MetaRules, f.metaRules.Add, f.metaRules.clear)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inActive := f.InActive()
|
||||
@@ -225,12 +122,6 @@ func NewFilter(opt *Opt) (f *Filter, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
if addImplicitExclude {
|
||||
err = f.Add(false, "/**")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if fs.GetConfig(context.Background()).Dump&fs.DumpFilters != 0 {
|
||||
fmt.Println("--- start filters ---")
|
||||
fmt.Println(f.DumpFilters())
|
||||
@@ -309,16 +200,7 @@ func (f *Filter) Add(Include bool, glob string) error {
|
||||
//
|
||||
// Line comments may be introduced with '#' or ';'
|
||||
func (f *Filter) AddRule(rule string) error {
|
||||
switch {
|
||||
case rule == "!":
|
||||
f.Clear()
|
||||
return nil
|
||||
case strings.HasPrefix(rule, "- "):
|
||||
return f.Add(false, rule[2:])
|
||||
case strings.HasPrefix(rule, "+ "):
|
||||
return f.Add(true, rule[2:])
|
||||
}
|
||||
return fmt.Errorf("malformed rule %q", rule)
|
||||
return addRule(rule, f.Add, f.Clear)
|
||||
}
|
||||
|
||||
// initAddFile creates f.files and f.dirs
|
||||
@@ -359,6 +241,7 @@ func (f *Filter) Files() FilesMap {
|
||||
func (f *Filter) Clear() {
|
||||
f.fileRules.clear()
|
||||
f.dirRules.clear()
|
||||
f.metaRules.clear()
|
||||
}
|
||||
|
||||
// InActive returns false if any filters are active
|
||||
@@ -370,17 +253,13 @@ func (f *Filter) InActive() bool {
|
||||
f.Opt.MaxSize < 0 &&
|
||||
f.fileRules.len() == 0 &&
|
||||
f.dirRules.len() == 0 &&
|
||||
f.metaRules.len() == 0 &&
|
||||
len(f.Opt.ExcludeFile) == 0)
|
||||
}
|
||||
|
||||
// IncludeRemote returns whether this remote passes the filter rules.
|
||||
func (f *Filter) IncludeRemote(remote string) bool {
|
||||
for _, rule := range f.fileRules.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
return true
|
||||
return f.fileRules.include(remote)
|
||||
}
|
||||
|
||||
// ListContainsExcludeFile checks if exclude file is present in the list.
|
||||
@@ -423,13 +302,7 @@ func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (b
|
||||
return include, nil
|
||||
}
|
||||
remote += "/"
|
||||
for _, rule := range f.dirRules.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
return f.dirRules.include(remote), nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -453,7 +326,7 @@ func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remo
|
||||
|
||||
// Include returns whether this object should be included into the
|
||||
// sync or not
|
||||
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
|
||||
func (f *Filter) Include(remote string, size int64, modTime time.Time, metadata fs.Metadata) bool {
|
||||
// filesFrom takes precedence
|
||||
if f.files != nil {
|
||||
_, include := f.files[remote]
|
||||
@@ -471,6 +344,15 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
|
||||
if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) {
|
||||
return false
|
||||
}
|
||||
if f.metaRules.len() > 0 && len(metadata) > 0 {
|
||||
metadatas := make([]string, 0, len(metadata))
|
||||
for key, value := range metadata {
|
||||
metadatas = append(metadatas, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
if !f.metaRules.includeMany(metadatas) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return f.IncludeRemote(remote)
|
||||
}
|
||||
|
||||
@@ -485,39 +367,17 @@ func (f *Filter) IncludeObject(ctx context.Context, o fs.Object) bool {
|
||||
} else {
|
||||
modTime = time.Unix(0, 0)
|
||||
}
|
||||
|
||||
return f.Include(o.Remote(), o.Size(), modTime)
|
||||
}
|
||||
|
||||
// forEachLine calls fn on every line in the file pointed to by path
|
||||
//
|
||||
// It ignores empty lines and lines starting with '#' or ';' if raw is false
|
||||
func forEachLine(path string, raw bool, fn func(string) error) (err error) {
|
||||
var scanner *bufio.Scanner
|
||||
if path == "-" {
|
||||
scanner = bufio.NewScanner(os.Stdin)
|
||||
} else {
|
||||
in, err := os.Open(path)
|
||||
var metadata fs.Metadata
|
||||
if f.metaRules.len() > 0 {
|
||||
var err error
|
||||
metadata, err = fs.GetMetadata(ctx, o)
|
||||
if err != nil {
|
||||
return err
|
||||
fs.Errorf(o, "Failed to read metadata: %v", err)
|
||||
metadata = nil
|
||||
}
|
||||
scanner = bufio.NewScanner(in)
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !raw {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err := fn(line)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
return f.Include(o.Remote(), o.Size(), modTime, metadata)
|
||||
}
|
||||
|
||||
// DumpFilters dumps the filters in textual form, 1 per line
|
||||
@@ -537,6 +397,12 @@ func (f *Filter) DumpFilters() string {
|
||||
for _, dirRule := range f.dirRules.rules {
|
||||
rules = append(rules, dirRule.String())
|
||||
}
|
||||
if f.metaRules.len() > 0 {
|
||||
rules = append(rules, "--- Metadata filter rules ---")
|
||||
for _, metaRule := range f.metaRules.rules {
|
||||
rules = append(rules, metaRule.String())
|
||||
}
|
||||
}
|
||||
return strings.Join(rules, "\n")
|
||||
}
|
||||
|
||||
|
||||
@@ -208,7 +208,7 @@ type includeTest struct {
|
||||
|
||||
func testInclude(t *testing.T, f *Filter, tests []includeTest) {
|
||||
for _, test := range tests {
|
||||
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0))
|
||||
got := f.Include(test.in, test.size, time.Unix(test.modTime, 0), nil)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("in=%q, size=%v, modTime=%v", test.in, test.size, time.Unix(test.modTime, 0)))
|
||||
}
|
||||
}
|
||||
@@ -714,7 +714,7 @@ func TestFilterMatchesFromDocs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = f.Add(false, "*")
|
||||
require.NoError(t, err)
|
||||
included := f.Include(test.file, 0, time.Unix(0, 0))
|
||||
included := f.Include(test.file, 0, time.Unix(0, 0), nil)
|
||||
if included != test.included {
|
||||
t.Errorf("%q match %q: want %v got %v", test.glob, test.file, test.included, included)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package filterflags
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
@@ -26,17 +27,27 @@ func Reload(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddRuleFlags add a set of rules flags with prefix
|
||||
func AddRuleFlags(flagSet *pflag.FlagSet, Opt *filter.RulesOpt, what, prefix string) {
|
||||
shortFilter := ""
|
||||
if prefix == "" {
|
||||
shortFilter = "f"
|
||||
}
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterRule, prefix+"filter", shortFilter, nil, fmt.Sprintf("Add a %s filtering rule", what))
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, prefix+"filter-from", "", nil, fmt.Sprintf("Read %s filtering patterns from a file (use - to read from stdin)", what))
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, prefix+"exclude", "", nil, fmt.Sprintf("Exclude %ss matching pattern", what))
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, prefix+"exclude-from", "", nil, fmt.Sprintf("Read %s exclude patterns from file (use - to read from stdin)", what))
|
||||
flags.StringArrayVarP(flagSet, &Opt.IncludeRule, prefix+"include", "", nil, fmt.Sprintf("Include %ss matching pattern", what))
|
||||
flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, prefix+"include-from", "", nil, fmt.Sprintf("Read %s include patterns from file (use - to read from stdin)", what))
|
||||
}
|
||||
|
||||
// AddFlags adds the non filing system specific flags to the command
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
rc.AddOptionReload("filter", &Opt, Reload)
|
||||
flags.BoolVarP(flagSet, &Opt.DeleteExcluded, "delete-excluded", "", false, "Delete files on dest excluded from sync")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterRule, "filter", "f", nil, "Add a file-filtering rule")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilterFrom, "filter-from", "", nil, "Read filtering patterns from a file (use - to read from stdin)")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeRule, "exclude", "", nil, "Exclude files matching pattern")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, "exclude-from", "", nil, "Read exclude patterns from file (use - to read from stdin)")
|
||||
AddRuleFlags(flagSet, &Opt.RulesOpt, "file", "")
|
||||
AddRuleFlags(flagSet, &Opt.MetaRules, "metadata", "metadata-")
|
||||
flags.StringArrayVarP(flagSet, &Opt.ExcludeFile, "exclude-if-present", "", nil, "Exclude directories if filename is present")
|
||||
flags.StringArrayVarP(flagSet, &Opt.IncludeRule, "include", "", nil, "Include files matching pattern")
|
||||
flags.StringArrayVarP(flagSet, &Opt.IncludeFrom, "include-from", "", nil, "Read include patterns from file (use - to read from stdin)")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilesFrom, "files-from", "", nil, "Read list of source-file names from file (use - to read from stdin)")
|
||||
flags.StringArrayVarP(flagSet, &Opt.FilesFromRaw, "files-from-raw", "", nil, "Read list of source-file names from file without any processing of lines (use - to read from stdin)")
|
||||
flags.FVarP(flagSet, &Opt.MinAge, "min-age", "", "Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y")
|
||||
|
||||
253
fs/filter/rules.go
Normal file
253
fs/filter/rules.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// RulesOpt is configuration for a rule set
|
||||
type RulesOpt struct {
|
||||
FilterRule []string
|
||||
FilterFrom []string
|
||||
ExcludeRule []string
|
||||
ExcludeFrom []string
|
||||
IncludeRule []string
|
||||
IncludeFrom []string
|
||||
}
|
||||
|
||||
// rule is one filter rule
|
||||
type rule struct {
|
||||
Include bool
|
||||
Regexp *regexp.Regexp
|
||||
}
|
||||
|
||||
// Match returns true if rule matches path
|
||||
func (r *rule) Match(path string) bool {
|
||||
return r.Regexp.MatchString(path)
|
||||
}
|
||||
|
||||
// String the rule
|
||||
func (r *rule) String() string {
|
||||
c := "-"
|
||||
if r.Include {
|
||||
c = "+"
|
||||
}
|
||||
return fmt.Sprintf("%s %s", c, r.Regexp.String())
|
||||
}
|
||||
|
||||
// rules is a slice of rules
|
||||
type rules struct {
|
||||
rules []rule
|
||||
existing map[string]struct{}
|
||||
}
|
||||
|
||||
type addFn func(Include bool, glob string) error
|
||||
|
||||
// add adds a rule if it doesn't exist already
|
||||
func (rs *rules) add(Include bool, re *regexp.Regexp) {
|
||||
if rs.existing == nil {
|
||||
rs.existing = make(map[string]struct{})
|
||||
}
|
||||
newRule := rule{
|
||||
Include: Include,
|
||||
Regexp: re,
|
||||
}
|
||||
newRuleString := newRule.String()
|
||||
if _, ok := rs.existing[newRuleString]; ok {
|
||||
return // rule already exists
|
||||
}
|
||||
rs.rules = append(rs.rules, newRule)
|
||||
rs.existing[newRuleString] = struct{}{}
|
||||
}
|
||||
|
||||
// Add adds a filter rule with include or exclude status indicated
|
||||
func (rs *rules) Add(Include bool, glob string) error {
|
||||
re, err := GlobToRegexp(glob, false /* f.Opt.IgnoreCase */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rs.add(Include, re)
|
||||
return nil
|
||||
}
|
||||
|
||||
type clearFn func()
|
||||
|
||||
// clear clears all the rules
|
||||
func (rs *rules) clear() {
|
||||
rs.rules = nil
|
||||
rs.existing = nil
|
||||
}
|
||||
|
||||
// len returns the number of rules
|
||||
func (rs *rules) len() int {
|
||||
return len(rs.rules)
|
||||
}
|
||||
|
||||
// include returns whether this remote passes the filter rules.
|
||||
func (rs *rules) include(remote string) bool {
|
||||
for _, rule := range rs.rules {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// include returns whether this collection of strings remote passes
|
||||
// the filter rules.
|
||||
//
|
||||
// the first rule is evaluated on all the remotes and if it matches
|
||||
// then the result is returned. If not the next rule is tested and so
|
||||
// on.
|
||||
func (rs *rules) includeMany(remotes []string) bool {
|
||||
for _, rule := range rs.rules {
|
||||
for _, remote := range remotes {
|
||||
if rule.Match(remote) {
|
||||
return rule.Include
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// forEachLine calls fn on every line in the file pointed to by path
|
||||
//
|
||||
// It ignores empty lines and lines starting with '#' or ';' if raw is false
|
||||
func forEachLine(path string, raw bool, fn func(string) error) (err error) {
|
||||
var scanner *bufio.Scanner
|
||||
if path == "-" {
|
||||
scanner = bufio.NewScanner(os.Stdin)
|
||||
} else {
|
||||
in, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scanner = bufio.NewScanner(in)
|
||||
defer fs.CheckClose(in, &err)
|
||||
}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if !raw {
|
||||
line = strings.TrimSpace(line)
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == ';' {
|
||||
continue
|
||||
}
|
||||
}
|
||||
err := fn(line)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
// AddRule adds a filter rule with include/exclude indicated by the prefix
|
||||
//
|
||||
// These are
|
||||
//
|
||||
// + glob
|
||||
// - glob
|
||||
// !
|
||||
//
|
||||
// '+' includes the glob, '-' excludes it and '!' resets the filter list
|
||||
//
|
||||
// Line comments may be introduced with '#' or ';'
|
||||
func addRule(rule string, add addFn, clear clearFn) error {
|
||||
switch {
|
||||
case rule == "!":
|
||||
clear()
|
||||
return nil
|
||||
case strings.HasPrefix(rule, "- "):
|
||||
return add(false, rule[2:])
|
||||
case strings.HasPrefix(rule, "+ "):
|
||||
return add(true, rule[2:])
|
||||
}
|
||||
return fmt.Errorf("malformed rule %q", rule)
|
||||
}
|
||||
|
||||
// AddRule adds a filter rule with include/exclude indicated by the prefix
|
||||
//
|
||||
// These are
|
||||
//
|
||||
// + glob
|
||||
// - glob
|
||||
// !
|
||||
//
|
||||
// '+' includes the glob, '-' excludes it and '!' resets the filter list
|
||||
//
|
||||
// Line comments may be introduced with '#' or ';'
|
||||
func (rs *rules) AddRule(rule string) error {
|
||||
return addRule(rule, rs.Add, rs.clear)
|
||||
}
|
||||
|
||||
// Parse the rules passed in and add them to the function
|
||||
func parseRules(opt *RulesOpt, add addFn, clear clearFn) (err error) {
|
||||
addImplicitExclude := false
|
||||
foundExcludeRule := false
|
||||
|
||||
for _, rule := range opt.IncludeRule {
|
||||
err = add(true, rule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
for _, rule := range opt.IncludeFrom {
|
||||
err := forEachLine(rule, false, func(line string) error {
|
||||
return add(true, line)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addImplicitExclude = true
|
||||
}
|
||||
for _, rule := range opt.ExcludeRule {
|
||||
err = add(false, rule)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundExcludeRule = true
|
||||
}
|
||||
for _, rule := range opt.ExcludeFrom {
|
||||
err := forEachLine(rule, false, func(line string) error {
|
||||
return add(false, line)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
foundExcludeRule = true
|
||||
}
|
||||
|
||||
if addImplicitExclude && foundExcludeRule {
|
||||
fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate")
|
||||
}
|
||||
|
||||
for _, rule := range opt.FilterRule {
|
||||
err = addRule(rule, add, clear)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, rule := range opt.FilterFrom {
|
||||
err := forEachLine(rule, false, func(rule string) error {
|
||||
return addRule(rule, add, clear)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if addImplicitExclude {
|
||||
err = add(false, "/**")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
2
fs/fs.go
2
fs/fs.go
@@ -40,7 +40,7 @@ var (
|
||||
ErrorNotAFile = errors.New("is not a regular file")
|
||||
ErrorNotDeleting = errors.New("not deleting files as there were IO errors")
|
||||
ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors")
|
||||
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes")
|
||||
ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes (try excluding the destination with a filter rule)")
|
||||
ErrorDirectoryNotEmpty = errors.New("directory not empty")
|
||||
ErrorImmutableModified = errors.New("immutable file modified")
|
||||
ErrorPermissionDenied = errors.New("permission denied")
|
||||
|
||||
@@ -424,7 +424,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
return nil, accounting.ErrorMaxTransferLimitReachedGraceful
|
||||
}
|
||||
}
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
||||
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && (f.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
|
||||
in := tr.Account(ctx, nil) // account the transfer
|
||||
in.ServerSideCopyStart()
|
||||
newDst, err = doCopy(ctx, src, remote)
|
||||
@@ -604,6 +604,7 @@ func SameObject(src, dst fs.Object) bool {
|
||||
// It returns the destination object if possible. Note that this may
|
||||
// be nil.
|
||||
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
@@ -618,7 +619,7 @@ func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.
|
||||
return newDst, nil
|
||||
}
|
||||
// See if we have Move available
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
|
||||
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && (fdst.Features().ServerSideAcrossConfigs || ci.ServerSideAcrossConfigs))) {
|
||||
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
|
||||
if dst != nil && !SameObject(src, dst) {
|
||||
err = DeleteFile(ctx, dst)
|
||||
@@ -814,17 +815,6 @@ func fixRoot(f fs.Info) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Overlapping returns true if fdst and fsrc point to the same
|
||||
// underlying Fs and they overlap.
|
||||
func Overlapping(fdst, fsrc fs.Info) bool {
|
||||
if !SameConfig(fdst, fsrc) {
|
||||
return false
|
||||
}
|
||||
fdstRoot := fixRoot(fdst)
|
||||
fsrcRoot := fixRoot(fsrc)
|
||||
return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot)
|
||||
}
|
||||
|
||||
// OverlappingFilterCheck returns true if fdst and fsrc point to the same
|
||||
// underlying Fs and they overlap without fdst being excluded by any filter rule.
|
||||
func OverlappingFilterCheck(ctx context.Context, fdst fs.Fs, fsrc fs.Fs) bool {
|
||||
@@ -1525,7 +1515,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
|
||||
dir := toDelete[i]
|
||||
// If a filter matches the directory then that
|
||||
// directory is a candidate for deletion
|
||||
if !fi.Include(dir+"/", 0, time.Now()) {
|
||||
if !fi.IncludeRemote(dir + "/") {
|
||||
continue
|
||||
}
|
||||
err = TryRmdir(ctx, f, dir)
|
||||
@@ -1848,10 +1838,10 @@ func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string)
|
||||
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
|
||||
}
|
||||
if srcFileName == "" {
|
||||
if Overlapping(fdst, backupDir) {
|
||||
if OverlappingFilterCheck(ctx, backupDir, fdst) {
|
||||
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
if Overlapping(fsrc, backupDir) {
|
||||
if OverlappingFilterCheck(ctx, backupDir, fsrc) {
|
||||
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
||||
}
|
||||
} else {
|
||||
@@ -1962,11 +1952,17 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
return err
|
||||
}
|
||||
}
|
||||
NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir)
|
||||
if err != nil {
|
||||
return err
|
||||
needTransfer := NeedTransfer(ctx, dstObj, srcObj)
|
||||
if needTransfer {
|
||||
NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if NoNeedTransfer {
|
||||
needTransfer = false
|
||||
}
|
||||
}
|
||||
if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) {
|
||||
if needTransfer {
|
||||
// If destination already exists, then we must move it into --backup-dir if required
|
||||
if dstObj != nil && backupDir != nil {
|
||||
err = MoveBackupDir(ctx, backupDir, dstObj)
|
||||
|
||||
@@ -1243,35 +1243,6 @@ func TestSame(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestOverlapping(t *testing.T) {
|
||||
a := &testFsInfo{name: "name", root: "root"}
|
||||
slash := string(os.PathSeparator) // native path separator
|
||||
for _, test := range []struct {
|
||||
name string
|
||||
root string
|
||||
expected bool
|
||||
}{
|
||||
{"name", "root", true},
|
||||
{"namey", "root", false},
|
||||
{"name", "rooty", false},
|
||||
{"namey", "rooty", false},
|
||||
{"name", "roo", false},
|
||||
{"name", "root/toot", true},
|
||||
{"name", "root/toot/", true},
|
||||
{"name", "root" + slash + "toot", true},
|
||||
{"name", "root" + slash + "toot" + slash, true},
|
||||
{"name", "", true},
|
||||
{"name", "/", true},
|
||||
} {
|
||||
b := &testFsInfo{name: test.name, root: test.root}
|
||||
what := fmt.Sprintf("(%q,%q) vs (%q,%q)", a.name, a.root, b.name, b.root)
|
||||
actual := operations.Overlapping(a, b)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
actual = operations.Overlapping(b, a)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
}
|
||||
}
|
||||
|
||||
// testFs is for unit testing fs.Fs
|
||||
type testFs struct {
|
||||
testFsInfo
|
||||
|
||||
@@ -406,3 +406,34 @@ func rcJobStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
job.Stop()
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rc.Add(rc.Call{
|
||||
Path: "job/stopgroup",
|
||||
Fn: rcGroupStop,
|
||||
Title: "Stop all running jobs in a group",
|
||||
Help: `Parameters:
|
||||
|
||||
- group - name of the group (string).
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
// Stops all running jobs in a group
|
||||
func rcGroupStop(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
group, err := in.GetString("group")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
running.mu.RLock()
|
||||
defer running.mu.RUnlock()
|
||||
for _, job := range running.jobs {
|
||||
if job.Group == group {
|
||||
job.mu.Lock()
|
||||
job.Stop()
|
||||
job.mu.Unlock()
|
||||
}
|
||||
}
|
||||
out = make(rc.Params)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -452,6 +452,48 @@ func TestRcSyncJobStop(t *testing.T) {
|
||||
assert.Equal(t, false, out["success"])
|
||||
}
|
||||
|
||||
func TestRcJobStopGroup(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
jobID = 0
|
||||
_, _, err := NewJob(ctx, ctxFn, rc.Params{
|
||||
"_async": true,
|
||||
"_group": "myparty",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, _, err = NewJob(ctx, ctxFn, rc.Params{
|
||||
"_async": true,
|
||||
"_group": "myparty",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
call := rc.Calls.Get("job/stopgroup")
|
||||
assert.NotNil(t, call)
|
||||
in := rc.Params{"group": "myparty"}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
require.Empty(t, out)
|
||||
|
||||
in = rc.Params{}
|
||||
_, err = call.Fn(context.Background(), in)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Didn't find key")
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
call = rc.Calls.Get("job/status")
|
||||
assert.NotNil(t, call)
|
||||
for i := 1; i <= 2; i++ {
|
||||
in = rc.Params{"jobid": i}
|
||||
out, err = call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, out)
|
||||
assert.Equal(t, "myparty", out["group"])
|
||||
assert.Equal(t, "context canceled", out["error"])
|
||||
assert.Equal(t, true, out["finished"])
|
||||
assert.Equal(t, false, out["success"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestOnFinish(t *testing.T) {
|
||||
jobID = 0
|
||||
done := make(chan struct{})
|
||||
|
||||
@@ -331,11 +331,17 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src)
|
||||
// Check to see if can store this
|
||||
if src.Storable() {
|
||||
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir)
|
||||
if err != nil {
|
||||
s.processError(err)
|
||||
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
|
||||
if needTransfer {
|
||||
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir)
|
||||
if err != nil {
|
||||
s.processError(err)
|
||||
}
|
||||
if NoNeedTransfer {
|
||||
needTransfer = false
|
||||
}
|
||||
}
|
||||
if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) {
|
||||
if needTransfer {
|
||||
// If files are treated as immutable, fail if destination exists and does not match
|
||||
if s.ci.Immutable && pair.Dst != nil {
|
||||
err := fs.CountError(fserrors.NoRetryError(fs.ErrorImmutableModified))
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
//go:build !go1.16
|
||||
// +build !go1.16
|
||||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
package fs
|
||||
|
||||
// Upgrade to Go version 1.16 to compile rclone - latest stable go
|
||||
// Upgrade to Go version 1.17 to compile rclone - latest stable go
|
||||
// compiler recommended.
|
||||
func init() { Go_version_1_16_required_for_compilation() }
|
||||
func init() { Go_version_1_17_required_for_compilation() }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.59.0"
|
||||
var VersionTag = "v1.60.0"
|
||||
|
||||
61
go.mod
61
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/rclone/rclone
|
||||
|
||||
go 1.16
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05
|
||||
@@ -51,7 +51,7 @@ require (
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.2
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20220421173602-ce7e5a65cac7
|
||||
github.com/xanzy/ssh-agent v0.3.1
|
||||
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
|
||||
@@ -70,6 +70,63 @@ require (
|
||||
storj.io/uplink v1.9.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.6.1 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/anacrolix/log v0.10.1-0.20220126091220-5c1b6f3af59c // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
|
||||
github.com/jcmturner/gofork v1.0.0 // indirect
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/onsi/gomega v1.13.0 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf // indirect
|
||||
storj.io/drpc v0.0.30 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220623141421-5afb4c282135
|
||||
|
||||
2
go.sum
2
go.sum
@@ -593,6 +593,8 @@ github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8 h1:IGJQmLBLYBdAknj21W3JsVof0yjEXfy1Q0K3YZebDOg=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
|
||||
@@ -217,6 +217,14 @@ This needs expanding and submitting to pypi...
|
||||
|
||||
Rust bindings are available in the `librclone` crate: https://crates.io/crates/librclone
|
||||
|
||||
## PHP
|
||||
|
||||
The `php` subdirectory contains how to use the C library librclone in php through foreign
|
||||
function interface (FFI).
|
||||
|
||||
Useful docs:
|
||||
- [PHP / FFI](https://www.php.net/manual/en/book.ffi.php)
|
||||
|
||||
## TODO
|
||||
|
||||
- Async jobs must currently be cancelled manually at the moment - RcloneFinalize doesn't do it.
|
||||
|
||||
@@ -35,12 +35,12 @@ import (
|
||||
"github.com/rclone/rclone/librclone/librclone"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // import all backends
|
||||
_ "github.com/rclone/rclone/cmd/cmount" // import cmount
|
||||
_ "github.com/rclone/rclone/cmd/mount" // import mount
|
||||
_ "github.com/rclone/rclone/cmd/mount2" // import mount2
|
||||
_ "github.com/rclone/rclone/fs/operations" // import operations/* rc commands
|
||||
_ "github.com/rclone/rclone/fs/sync" // import sync/*
|
||||
_ "github.com/rclone/rclone/lib/plugin" // import plugins
|
||||
_ "github.com/rclone/rclone/cmd/mount" // import mount
|
||||
_ "github.com/rclone/rclone/cmd/mount2" // import mount2
|
||||
_ "github.com/rclone/rclone/cmd/cmount" // import cmount
|
||||
)
|
||||
|
||||
// RcloneInitialize initializes rclone as a library
|
||||
|
||||
53
librclone/php/rclone.php
Normal file
53
librclone/php/rclone.php
Normal file
@@ -0,0 +1,53 @@
|
||||
<?php
|
||||
/*
|
||||
PHP interface to librclone.so, using FFI ( Foreign Function Interface )
|
||||
|
||||
Create an rclone object
|
||||
|
||||
$rc = new Rclone( __DIR__ . '/librclone.so' );
|
||||
|
||||
Then call rpc calls on it
|
||||
|
||||
$rc->rpc( "config/listremotes", "{}" );
|
||||
|
||||
When finished, close it
|
||||
|
||||
$rc->close();
|
||||
*/
|
||||
|
||||
class Rclone {
|
||||
|
||||
protected $rclone;
|
||||
private $out;
|
||||
|
||||
public function __construct( $libshared )
|
||||
{
|
||||
$this->rclone = \FFI::cdef("
|
||||
struct RcloneRPCResult {
|
||||
char* Output;
|
||||
int Status;
|
||||
};
|
||||
extern void RcloneInitialize();
|
||||
extern void RcloneFinalize();
|
||||
extern struct RcloneRPCResult RcloneRPC(char* method, char* input);
|
||||
extern void RcloneFreeString(char* str);
|
||||
", $libshared);
|
||||
$this->rclone->RcloneInitialize();
|
||||
}
|
||||
|
||||
public function rpc( $method, $input ): array
|
||||
{
|
||||
$this->out = $this->rclone->RcloneRPC( $method, $input );
|
||||
$response = [
|
||||
'output' => \FFI::string( $this->out->Output ),
|
||||
'status' => $this->out->Status
|
||||
];
|
||||
$this->rclone->RcloneFreeString( $this->out->Output );
|
||||
return $response;
|
||||
}
|
||||
|
||||
public function close( ): void
|
||||
{
|
||||
$this->rclone->RcloneFinalize();
|
||||
}
|
||||
}
|
||||
55
librclone/php/test.php
Normal file
55
librclone/php/test.php
Normal file
@@ -0,0 +1,55 @@
|
||||
<?php
|
||||
/*
|
||||
Test program for librclone
|
||||
*/
|
||||
|
||||
include_once ( "rclone.php" );
|
||||
|
||||
const REMOTE = 'gdrive:/';
|
||||
const FOLDER = "rcloneTest";
|
||||
const FILE = "testFile.txt";
|
||||
|
||||
$rc = new Rclone( __DIR__ . '/librclone.so' );
|
||||
|
||||
$response = $rc->rpc( "config/listremotes", "{}" );
|
||||
print_r( $response );
|
||||
|
||||
$response = $rc->rpc("operations/mkdir",
|
||||
json_encode( [
|
||||
'fs' => REMOTE,
|
||||
'remote'=> FOLDER
|
||||
]));
|
||||
print_r( $response );
|
||||
|
||||
$response = $rc->rpc("operations/list",
|
||||
json_encode( [
|
||||
'fs' => REMOTE,
|
||||
'remote'=> ''
|
||||
]));
|
||||
print_r( $response );
|
||||
|
||||
file_put_contents("./" . FILE, "Success!!!");
|
||||
$response = $rc->rpc("operations/copyfile",
|
||||
json_encode( [
|
||||
'srcFs' => getcwd(),
|
||||
'srcRemote'=> FILE,
|
||||
'dstFs' => REMOTE . FOLDER,
|
||||
'dstRemote' => FILE
|
||||
]));
|
||||
print_r( $response );
|
||||
|
||||
$response = $rc->rpc("operations/list",
|
||||
json_encode( [
|
||||
'fs' => REMOTE . FOLDER,
|
||||
'remote'=> ''
|
||||
]));
|
||||
print_r( $response );
|
||||
if ( $response['output'] ) {
|
||||
$array = @json_decode( $response['output'], true );
|
||||
if ( $response['status'] == 200 && $array['list'] ?? 0 ) {
|
||||
$valid = $array['list'][0]['Name'] == FILE ? "SUCCESS" : "FAIL";
|
||||
print_r("The test seems: " . $valid . "\n");
|
||||
}
|
||||
}
|
||||
|
||||
$rc->close();
|
||||
@@ -39,19 +39,19 @@ type File struct {
|
||||
inode uint64 // inode number - read only
|
||||
size int64 // size of file - read and written with atomic int64 - must be 64 bit aligned
|
||||
|
||||
muRW sync.Mutex // synchronize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
|
||||
|
||||
mu sync.RWMutex // protects the following
|
||||
d *Dir // parent directory
|
||||
dPath string // path of parent directory. NB dir rename means all Files are flushed
|
||||
o fs.Object // NB o may be nil if file is being written
|
||||
leaf string // leaf name of the object
|
||||
writers []Handle // writers for this file
|
||||
nwriters int32 // len(writers) which is read/updated with atomic
|
||||
pendingModTime time.Time // will be applied once o becomes available, i.e. after file was written
|
||||
pendingRenameFun func(ctx context.Context) error // will be run/renamed after all writers close
|
||||
appendMode bool // file was opened with O_APPEND
|
||||
sys atomic.Value // user defined info to be attached here
|
||||
|
||||
muRW sync.Mutex // synchronize RWFileHandle.openPending(), RWFileHandle.close() and File.Remove
|
||||
nwriters int32 // len(writers) which is read/updated with atomic
|
||||
appendMode bool // file was opened with O_APPEND
|
||||
}
|
||||
|
||||
// newFile creates a new File
|
||||
|
||||
16
vfs/read.go
16
vfs/read.go
@@ -20,19 +20,19 @@ type ReadFileHandle struct {
|
||||
baseHandle
|
||||
done func(ctx context.Context, err error)
|
||||
mu sync.Mutex
|
||||
cond *sync.Cond // cond lock for out of sequence reads
|
||||
closed bool // set if handle has been closed
|
||||
cond sync.Cond // cond lock for out of sequence reads
|
||||
r *accounting.Account
|
||||
readCalled bool // set if read has been called
|
||||
size int64 // size of the object (0 for unknown length)
|
||||
offset int64 // offset of read of o
|
||||
roffset int64 // offset of Read() calls
|
||||
noSeek bool
|
||||
sizeUnknown bool // set if size of source is not known
|
||||
file *File
|
||||
hash *hash.MultiHasher
|
||||
opened bool
|
||||
remote string
|
||||
closed bool // set if handle has been closed
|
||||
readCalled bool // set if read has been called
|
||||
noSeek bool
|
||||
sizeUnknown bool // set if size of source is not known
|
||||
opened bool
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
@@ -63,7 +63,7 @@ func newReadFileHandle(f *File) (*ReadFileHandle, error) {
|
||||
size: nonNegative(o.Size()),
|
||||
sizeUnknown: o.Size() < 0,
|
||||
}
|
||||
fh.cond = sync.NewCond(&fh.mu)
|
||||
fh.cond = sync.Cond{L: &fh.mu}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -267,7 +267,7 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) {
|
||||
maxBuf = len(p)
|
||||
}
|
||||
if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) {
|
||||
waitSequential("read", fh.remote, fh.cond, fh.file.VFS().Opt.ReadWait, &fh.offset, off)
|
||||
waitSequential("read", fh.remote, &fh.cond, fh.file.VFS().Opt.ReadWait, &fh.offset, off)
|
||||
}
|
||||
doSeek := off != fh.offset
|
||||
if doSeek && fh.noSeek {
|
||||
|
||||
@@ -52,7 +52,7 @@ type Cache struct {
|
||||
avFn AddVirtualFn // if set, can be called to add dir entries
|
||||
|
||||
mu sync.Mutex // protects the following variables
|
||||
cond *sync.Cond // cond lock for synchronous cache cleaning
|
||||
cond sync.Cond // cond lock for synchronous cache cleaning
|
||||
item map[string]*Item // files/directories in the cache
|
||||
errItems map[string]error // items in error state
|
||||
used int64 // total size of files in the cache
|
||||
@@ -139,7 +139,7 @@ func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVir
|
||||
|
||||
// Create a channel for cleaner to be kicked upon out of space con
|
||||
c.kick = make(chan struct{}, 1)
|
||||
c.cond = sync.NewCond(&c.mu)
|
||||
c.cond = sync.Cond{L: &c.mu}
|
||||
|
||||
go c.cleaner(ctx)
|
||||
|
||||
@@ -739,27 +739,17 @@ func (c *Cache) clean(kicked bool) {
|
||||
oldItems, oldUsed := len(c.item), fs.SizeSuffix(c.used)
|
||||
c.mu.Unlock()
|
||||
|
||||
// loop cleaning the cache until we reach below cache quota
|
||||
for {
|
||||
// Remove any files that are over age
|
||||
c.purgeOld(c.opt.CacheMaxAge)
|
||||
// Remove any files that are over age
|
||||
c.purgeOld(c.opt.CacheMaxAge)
|
||||
|
||||
if int64(c.opt.CacheMaxSize) <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Now remove files not in use until cache size is below quota starting from the
|
||||
// oldest first
|
||||
// If have a maximum cache size...
|
||||
if int64(c.opt.CacheMaxSize) > 0 {
|
||||
// Remove files not in use until cache size is below quota starting from the oldest first
|
||||
c.purgeOverQuota(int64(c.opt.CacheMaxSize))
|
||||
|
||||
// Remove cache files that are not dirty if we are still above the max cache size
|
||||
c.purgeClean(int64(c.opt.CacheMaxSize))
|
||||
c.retryFailedResets()
|
||||
|
||||
used := c.updateUsed()
|
||||
if used <= int64(c.opt.CacheMaxSize) && len(c.errItems) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Was kicked?
|
||||
|
||||
@@ -57,16 +57,16 @@ type Item struct {
|
||||
// read only
|
||||
c *Cache // cache this is part of
|
||||
mu sync.Mutex // protect the variables
|
||||
cond *sync.Cond // synchronize with cache cleaner
|
||||
cond sync.Cond // synchronize with cache cleaner
|
||||
name string // name in the VFS
|
||||
opens int // number of times file is open
|
||||
downloaders *downloaders.Downloaders // a record of the downloaders in action - may be nil
|
||||
o fs.Object // object we are caching - may be nil
|
||||
fd *os.File // handle we are using to read and write to the file
|
||||
modified bool // set if the file has been modified since the last Open
|
||||
info Info // info about the file to persist to backing store
|
||||
writeBackID writeback.Handle // id of any writebacks in progress
|
||||
pendingAccesses int // number of threads - cache reset not allowed if not zero
|
||||
modified bool // set if the file has been modified since the last Open
|
||||
beingReset bool // cache cleaner is resetting the cache file, access not allowed
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func newItem(c *Cache, name string) (item *Item) {
|
||||
ATime: now,
|
||||
},
|
||||
}
|
||||
item.cond = sync.NewCond(&item.mu)
|
||||
item.cond = sync.Cond{L: &item.mu}
|
||||
// check the cache file exists
|
||||
osPath := c.toOSPath(name)
|
||||
fi, statErr := os.Stat(osPath)
|
||||
|
||||
12
vfs/write.go
12
vfs/write.go
@@ -15,17 +15,17 @@ import (
|
||||
type WriteFileHandle struct {
|
||||
baseHandle
|
||||
mu sync.Mutex
|
||||
cond *sync.Cond // cond lock for out of sequence writes
|
||||
closed bool // set if handle has been closed
|
||||
cond sync.Cond // cond lock for out of sequence writes
|
||||
remote string
|
||||
pipeWriter *io.PipeWriter
|
||||
o fs.Object
|
||||
result chan error
|
||||
file *File
|
||||
writeCalled bool // set the first time Write() is called
|
||||
offset int64
|
||||
opened bool
|
||||
flags int
|
||||
closed bool // set if handle has been closed
|
||||
writeCalled bool // set the first time Write() is called
|
||||
opened bool
|
||||
truncated bool
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func newWriteFileHandle(d *Dir, f *File, remote string, flags int) (*WriteFileHa
|
||||
result: make(chan error, 1),
|
||||
file: f,
|
||||
}
|
||||
fh.cond = sync.NewCond(&fh.mu)
|
||||
fh.cond = sync.Cond{L: &fh.mu}
|
||||
fh.file.addWriter(fh)
|
||||
return fh, nil
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func (fh *WriteFileHandle) writeAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, ECLOSED
|
||||
}
|
||||
if fh.offset != off {
|
||||
waitSequential("write", fh.remote, fh.cond, fh.file.VFS().Opt.WriteWait, &fh.offset, off)
|
||||
waitSequential("write", fh.remote, &fh.cond, fh.file.VFS().Opt.WriteWait, &fh.offset, off)
|
||||
}
|
||||
if fh.offset != off {
|
||||
fs.Errorf(fh.remote, "WriteFileHandle.Write: can't seek in file without --vfs-cache-mode >= writes")
|
||||
|
||||
Reference in New Issue
Block a user