mirror of
https://github.com/rclone/rclone.git
synced 2026-01-08 19:43:58 +00:00
Compare commits
1 Commits
pr-7205-pr
...
fix-linux-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f26e41d1c5 |
22
.github/workflows/build.yml
vendored
22
.github/workflows/build.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.21.0-rc.3'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.21'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.21'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.21'
|
||||
go: '1.21.0-rc.3'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '1.21.0-rc.3'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -232,7 +232,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
@@ -244,7 +244,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '1.21.0-rc.3'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -261,7 +261,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -269,7 +269,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '1.21.0-rc.3'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
2
.github/workflows/winget.yml
vendored
2
.github/workflows/winget.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,10 +8,10 @@ rclone.iml
|
||||
.idea
|
||||
.history
|
||||
*.test
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
@@ -33,67 +33,23 @@ issues:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
# don't disable the revive messages about comments on exported functions
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# setting rules seems to disable all the rules, so re-enable them here
|
||||
rules:
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
|
||||
@@ -419,7 +419,7 @@ remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/types.go`
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
@@ -428,19 +428,14 @@ Getting going
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Important:
|
||||
|
||||
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
|
||||
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
|
||||
@@ -18,9 +18,6 @@ Current active maintainers of rclone are:
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
@@ -38,7 +38,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
@@ -17,7 +18,6 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -33,6 +33,7 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -45,8 +46,10 @@ import (
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -67,6 +70,8 @@ const (
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -332,16 +337,17 @@ to start uploading.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: fs.Duration(time.Minute),
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: false,
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -426,6 +432,8 @@ type Options struct {
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
@@ -449,6 +457,8 @@ type Fs struct {
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
poolSize int64 // size of pages in memory pool
|
||||
publicAccess container.PublicAccessType // Container Public Access Level
|
||||
}
|
||||
|
||||
@@ -661,6 +671,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
cache: bucket.NewCache(),
|
||||
cntSVCcache: make(map[string]*container.Client, 1),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
poolSize: int64(opt.ChunkSize),
|
||||
}
|
||||
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
|
||||
f.setRoot(root)
|
||||
@@ -1486,7 +1503,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && container != "" && directory != "" {
|
||||
if f.opt.DirectoryMarkers && container != "" && dir != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
@@ -1520,10 +1537,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
if directory != "" {
|
||||
if container == "" || directory != "" {
|
||||
// Delegate to caller if not root of a container
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
@@ -1580,6 +1594,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1955,8 +1982,8 @@ func (rs *readSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// increment the array as LSB binary
|
||||
func increment(xs *[8]byte) {
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
@@ -1967,43 +1994,22 @@ func increment(xs *[8]byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// record chunk number and id for Close
|
||||
type azBlock struct {
|
||||
chunkNumber int
|
||||
id string
|
||||
}
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// Implements the fs.ChunkWriter interface
|
||||
type azChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
ui uploadInfo
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
o *Object
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
|
||||
// Calculate correct partSize
|
||||
partSize := f.opt.ChunkSize
|
||||
partSize := o.fs.opt.ChunkSize
|
||||
totalParts := -1
|
||||
size := src.Size()
|
||||
|
||||
// make concurrency machinery
|
||||
concurrency := o.fs.opt.UploadConcurrency
|
||||
if concurrency < 1 {
|
||||
concurrency = 1
|
||||
}
|
||||
tokens := pacer.NewTokenDispenser(concurrency)
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
@@ -2017,13 +2023,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
|
||||
})
|
||||
} else {
|
||||
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize)
|
||||
partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
|
||||
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
|
||||
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
|
||||
return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
|
||||
}
|
||||
totalParts = int(fs.SizeSuffix(size) / partSize)
|
||||
if fs.SizeSuffix(size)%partSize != 0 {
|
||||
@@ -2033,262 +2039,173 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
|
||||
|
||||
chunkWriter := &azChunkWriter{
|
||||
chunkSize: int64(partSize),
|
||||
size: size,
|
||||
f: f,
|
||||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(partSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload")
|
||||
return info, chunkWriter, nil
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
// FIXME it would be nice to delete uncommitted blocks
|
||||
// See: https://github.com/rclone/rclone/issues/5583
|
||||
//
|
||||
// However there doesn't seem to be an easy way of doing this other than
|
||||
// by deleting the target.
|
||||
//
|
||||
// This means that a failed upload deletes the target which isn't ideal.
|
||||
//
|
||||
// Uploading a zero length blob and deleting it will remove the
|
||||
// uncommitted blocks I think.
|
||||
//
|
||||
// Could check to see if a file exists already and if it
|
||||
// doesn't then create a 0 length file and delete it to flush
|
||||
// the uncommitted blocks.
|
||||
//
|
||||
// This is what azcopy does
|
||||
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
||||
// defer atexit.OnError(&err, func() {
|
||||
// fs.Debugf(o, "Cancelling multipart upload")
|
||||
// // Code goes here!
|
||||
// })()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// If no data read, don't write the chunk
|
||||
if currentChunkSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
// Upload the chunks
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
|
||||
position = fs.SizeSuffix(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(&w.binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
|
||||
|
||||
// Save the blockID for the commit
|
||||
w.blocksMu.Lock()
|
||||
w.blocks = append(w.blocks, azBlock{
|
||||
chunkNumber: chunkNumber,
|
||||
id: blockID,
|
||||
})
|
||||
w.blocksMu.Unlock()
|
||||
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
tokens.Put() // return the token
|
||||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
|
||||
// Read the chunk
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
transactionalMD5 := md5sum[:]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeekCloser{wrappedReader, bufferReader}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// ready for next block
|
||||
if size >= 0 {
|
||||
remaining -= partSize
|
||||
}
|
||||
position += partSize
|
||||
}
|
||||
|
||||
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize)
|
||||
return currentChunkSize, err
|
||||
}
|
||||
|
||||
// Abort the multpart upload.
|
||||
//
|
||||
// FIXME it would be nice to delete uncommitted blocks.
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/5583
|
||||
//
|
||||
// However there doesn't seem to be an easy way of doing this other than
|
||||
// by deleting the target.
|
||||
//
|
||||
// This means that a failed upload deletes the target which isn't ideal.
|
||||
//
|
||||
// Uploading a zero length blob and deleting it will remove the
|
||||
// uncommitted blocks I think.
|
||||
//
|
||||
// Could check to see if a file exists already and if it doesn't then
|
||||
// create a 0 length file and delete it to flush the uncommitted
|
||||
// blocks.
|
||||
//
|
||||
// This is what azcopy does
|
||||
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
||||
func (w *azChunkWriter) Abort(ctx context.Context) error {
|
||||
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close and finalise the multipart upload
|
||||
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
||||
// sort the completed parts by part number
|
||||
sort.Slice(w.blocks, func(i, j int) bool {
|
||||
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
|
||||
})
|
||||
|
||||
// Create a list of block IDs
|
||||
blockIDs := make([]string, len(w.blocks))
|
||||
for i := range w.blocks {
|
||||
blockIDs[i] = w.blocks[i].id
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: w.o.getMetadata(),
|
||||
Tier: parseTier(w.f.opt.AccessTier),
|
||||
HTTPHeaders: &w.ui.httpHeaders,
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options)
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.CommitBlockList(ctx, blocks, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload finished")
|
||||
return err
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
return chunkWriter.(*azChunkWriter).ui, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// uploadSinglepart uploads a short blob using a single part upload
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
|
||||
// fs.Debugf(o, "Single part upload starting of object %d bytes", size)
|
||||
if size > chunkSize || size < 0 {
|
||||
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize)
|
||||
if size > o.fs.poolSize || size < 0 {
|
||||
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
|
||||
}
|
||||
|
||||
rw := multipart.NewRW()
|
||||
defer fs.CheckClose(rw, &err)
|
||||
buf := o.fs.pool.Get()
|
||||
defer o.fs.pool.Put(buf)
|
||||
|
||||
n, err := io.CopyN(rw, in, size+1)
|
||||
n, err := readers.ReadFill(in, buf)
|
||||
if err == nil {
|
||||
// Check to see whether in is exactly len(buf) or bigger
|
||||
var buf2 = []byte{0}
|
||||
n2, err2 := readers.ReadFill(in, buf2)
|
||||
if n2 != 0 || err2 != io.EOF {
|
||||
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
|
||||
}
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("single part upload read failed: %w", err)
|
||||
}
|
||||
if n != size {
|
||||
if int64(n) != size {
|
||||
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
|
||||
}
|
||||
|
||||
rs := &readSeekCloser{Reader: rw, Seeker: rw}
|
||||
b := bytes.NewReader(buf[:n])
|
||||
rs := &readSeekCloser{Reader: b, Seeker: b}
|
||||
|
||||
options := blockblob.UploadOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
HTTPHeaders: &ui.httpHeaders,
|
||||
HTTPHeaders: httpHeaders,
|
||||
}
|
||||
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry
|
||||
_, err = rs.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = ui.blb.Upload(ctx, rs, &options)
|
||||
// Don't retry, return a retry error instead
|
||||
return o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = blb.Upload(ctx, rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Info needed for an upload
|
||||
type uploadInfo struct {
|
||||
blb *blockblob.Client
|
||||
httpHeaders blob.HTTPHeaders
|
||||
isDirMarker bool
|
||||
}
|
||||
|
||||
// Prepare the object for upload
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return ui, fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
_, ui.isDirMarker = o.meta[dirMetaKey]
|
||||
if !ui.isDirMarker {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
|
||||
// Create the HTTP headers for the upload
|
||||
ui.httpHeaders = blob.HTTPHeaders{
|
||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||
}
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply upload options (also allows one to overwrite content-type)
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
ui.httpHeaders.BlobCacheControl = pString(value)
|
||||
case "content-disposition":
|
||||
ui.httpHeaders.BlobContentDisposition = pString(value)
|
||||
case "content-encoding":
|
||||
ui.httpHeaders.BlobContentEncoding = pString(value)
|
||||
case "content-language":
|
||||
ui.httpHeaders.BlobContentLanguage = pString(value)
|
||||
case "content-type":
|
||||
ui.httpHeaders.BlobContentType = pString(value)
|
||||
}
|
||||
}
|
||||
|
||||
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
|
||||
return ui, nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
@@ -2304,26 +2221,80 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
|
||||
size := src.Size()
|
||||
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize)
|
||||
var ui uploadInfo
|
||||
|
||||
if multipartUpload {
|
||||
ui, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else {
|
||||
ui, err = o.prepareUpload(ctx, src, options)
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
_, isDirMarker := o.meta[dirMetaKey]
|
||||
if !isDirMarker {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
return err
|
||||
}
|
||||
err = o.uploadSinglepart(ctx, in, size, ui)
|
||||
}
|
||||
|
||||
// Update Mod time
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the HTTP headers for the upload
|
||||
httpHeaders := blob.HTTPHeaders{
|
||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||
}
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.BlobContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply upload options (also allows one to overwrite content-type)
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
httpHeaders.BlobCacheControl = pString(value)
|
||||
case "content-disposition":
|
||||
httpHeaders.BlobContentDisposition = pString(value)
|
||||
case "content-encoding":
|
||||
httpHeaders.BlobContentEncoding = pString(value)
|
||||
case "content-language":
|
||||
httpHeaders.BlobContentLanguage = pString(value)
|
||||
case "content-type":
|
||||
httpHeaders.BlobContentType = pString(value)
|
||||
}
|
||||
}
|
||||
|
||||
blb := o.fs.getBlockBlobSVC(container, containerPath)
|
||||
size := src.Size()
|
||||
multipartUpload := size < 0 || size > o.fs.poolSize
|
||||
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
if multipartUpload {
|
||||
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
|
||||
} else {
|
||||
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Refresh metadata on object
|
||||
if !ui.isDirMarker {
|
||||
if !isDirMarker {
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
@@ -2412,14 +2383,13 @@ func parseTier(tier string) *blob.AccessTier {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -20,18 +20,17 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in [8]byte
|
||||
want [8]byte
|
||||
in []byte
|
||||
want []byte
|
||||
}{
|
||||
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
||||
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
||||
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
||||
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||
} {
|
||||
increment(&test.in)
|
||||
increment(test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob"
|
||||
name := "TestAzureBlob:"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
RemoteName: name,
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
|
||||
148
backend/b2/b2.go
148
backend/b2/b2.go
@@ -32,7 +32,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -58,7 +57,9 @@ const (
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -148,18 +149,6 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||
5,000,000 Bytes is the minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--b2-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files.
|
||||
@@ -199,16 +188,16 @@ The minimum value is 1 second. The maximum value is one week.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: fs.Duration(time.Minute),
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: false,
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -235,10 +224,11 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -263,6 +253,7 @@ type Fs struct {
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a b2 object
|
||||
@@ -467,6 +458,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -600,24 +597,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// getRW gets a RW buffer and an upload token
|
||||
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
|
||||
//
|
||||
// If noBuf is set then it just gets an upload token
|
||||
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
|
||||
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
|
||||
f.uploadToken.Get()
|
||||
if !noBuf {
|
||||
rw = multipart.NewRW()
|
||||
buf = f.pool.Get()
|
||||
}
|
||||
return rw
|
||||
return buf
|
||||
}
|
||||
|
||||
// putRW returns a RW buffer to the memory pool and returns an upload
|
||||
// token
|
||||
// putBuf returns a buffer to the memory pool and an upload token
|
||||
//
|
||||
// If buf is nil then it just returns the upload token
|
||||
func (f *Fs) putRW(rw *pool.RW) {
|
||||
if rw != nil {
|
||||
_ = rw.Close()
|
||||
// If noBuf is set then it just returns the upload token
|
||||
func (f *Fs) putBuf(buf []byte, noBuf bool) {
|
||||
if !noBuf {
|
||||
f.pool.Put(buf)
|
||||
}
|
||||
f.uploadToken.Put()
|
||||
}
|
||||
@@ -1297,7 +1293,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Copy(ctx)
|
||||
return up.Upload(ctx)
|
||||
}
|
||||
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
@@ -1426,7 +1422,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath := "/" + urlEncode(bucketPath)
|
||||
absPath := "/" + bucketPath
|
||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||
bucketType, err := f.getbucketType(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -1865,11 +1861,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size < 0 {
|
||||
if size == -1 {
|
||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||
rw := o.fs.getRW(false)
|
||||
buf := o.fs.getBuf(false)
|
||||
|
||||
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
|
||||
n, err := io.ReadFull(in, buf)
|
||||
if err == nil {
|
||||
bufReader := bufio.NewReader(in)
|
||||
in = bufReader
|
||||
@@ -1880,26 +1876,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
o.fs.putRW(rw)
|
||||
o.fs.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
// NB Stream returns the buffer and token
|
||||
return up.Stream(ctx, rw)
|
||||
} else if err == io.EOF {
|
||||
return up.Stream(ctx, buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putRW(rw)
|
||||
size = n
|
||||
in = rw
|
||||
defer o.fs.putBuf(buf, false)
|
||||
size = int64(n)
|
||||
in = bytes.NewReader(buf[:n])
|
||||
} else {
|
||||
o.fs.putRW(rw)
|
||||
o.fs.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
return err
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
@@ -2007,41 +2003,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// FIXME what if file is smaller than 1 chunk?
|
||||
if f.opt.Versions {
|
||||
return info, nil, errNotWithVersions
|
||||
}
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
return info, nil, errNotWithVersionAt
|
||||
}
|
||||
//size := src.Size()
|
||||
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
bucket, _ := o.split()
|
||||
err = f.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -2069,15 +2030,14 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -78,8 +80,7 @@ type largeUpload struct {
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int // calculated number of parts, if known
|
||||
sha1smu sync.Mutex // mutex to protect sha1s
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
@@ -92,16 +93,18 @@ type largeUpload struct {
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := 0
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
parts = int(size / int64(chunkSize))
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -149,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, 0, 16),
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
chunkSize: int64(chunkSize),
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
@@ -200,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Add an sha1 to the being built up sha1s
|
||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
||||
up.sha1smu.Lock()
|
||||
defer up.sha1smu.Unlock()
|
||||
if len(up.sha1s) < chunkNumber+1 {
|
||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
||||
}
|
||||
up.sha1s[chunkNumber] = sha1
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(1)
|
||||
}
|
||||
|
||||
err = up.f.pacer.Call(func() (bool, error) {
|
||||
// Discover the size by seeking to the end
|
||||
size, err = reader.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// rewind the reader on retry and after reading size
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
@@ -240,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(reader, sha1.New())
|
||||
sizeWithHash := size + int64(in.AdditionalLength())
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
@@ -271,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||
Body: up.wrap(in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &sizeWithHash,
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
@@ -282,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
@@ -290,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.addSha1(chunkNumber, in.HexSum())
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return size, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy a chunk
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_part",
|
||||
}
|
||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
||||
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||
var request = api.CopyPartRequest{
|
||||
SourceID: up.src.id,
|
||||
LargeFileID: up.id,
|
||||
PartNumber: int64(part + 1),
|
||||
PartNumber: part,
|
||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||
}
|
||||
var response api.UploadPartResponse
|
||||
@@ -322,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
up.addSha1(part, response.SHA1)
|
||||
up.sha1s[part-1] = response.SHA1
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -333,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
|
||||
return err
|
||||
}
|
||||
|
||||
// Close closes off the large upload
|
||||
func (up *largeUpload) Close(ctx context.Context) error {
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -355,8 +329,8 @@ func (up *largeUpload) Close(ctx context.Context) error {
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// Abort aborts the large upload
|
||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -381,98 +355,157 @@ func (up *largeUpload) Abort(ctx context.Context) error {
|
||||
// reaches EOF.
|
||||
//
|
||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
hasMoreParts = true
|
||||
)
|
||||
up.size = initialUploadBlock.Size()
|
||||
for part := 0; hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var rw *pool.RW
|
||||
if part == 1 {
|
||||
rw = initialUploadBlock
|
||||
} else {
|
||||
rw = up.f.getRW(false)
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putRW(rw)
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int64
|
||||
if part == 1 {
|
||||
n = rw.Size()
|
||||
} else {
|
||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
||||
if err == io.EOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
hasMoreParts = false
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putRW(rw)
|
||||
return err
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
g.Go(func() error {
|
||||
for part := int64(1); hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var buf []byte
|
||||
if part == 1 {
|
||||
buf = initialUploadBlock
|
||||
} else {
|
||||
buf = up.f.getBuf(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += n
|
||||
if part > maxParts {
|
||||
up.f.putRW(rw)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putRW(rw)
|
||||
_, err = up.WriteChunk(gCtx, part, rw)
|
||||
return err
|
||||
})
|
||||
}
|
||||
// Read the chunk
|
||||
var n int
|
||||
if part == 1 {
|
||||
n = len(buf)
|
||||
} else {
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, false)
|
||||
return up.transferChunk(gCtx, part, buf)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Close(ctx)
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
// Copy the chunks from the source to the destination
|
||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
)
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part <= up.parts; part++ {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
return up.copyChunk(gCtx, part, reqSize)
|
||||
})
|
||||
remaining -= reqSize
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := getBuf()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
putBuf(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
|
||||
if !up.doCopy {
|
||||
// Read the chunk
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
putBuf(buf)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer putBuf(buf)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
err = up.copyChunk(gCtx, part, reqSize)
|
||||
}
|
||||
return err
|
||||
})
|
||||
remaining -= reqSize
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Close(ctx)
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||
|
||||
// Types of things in Item/ItemMini
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
@@ -72,31 +72,20 @@ const (
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
||||
type ItemMini struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID int64 `json:"sequence_id,string"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID int64 `json:"sequence_id,string"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Parent ItemMini `json:"parent"`
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
@@ -292,30 +281,3 @@ type User struct {
|
||||
Address string `json:"address"`
|
||||
AvatarURL string `json:"avatar_url"`
|
||||
}
|
||||
|
||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
||||
"ITEM_COPY": {},
|
||||
"ITEM_CREATE": {},
|
||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
||||
"ITEM_MODIFY": {},
|
||||
"ITEM_MOVE": {},
|
||||
"ITEM_RENAME": {},
|
||||
"ITEM_TRASH": {},
|
||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
||||
"ITEM_UPLOAD": {},
|
||||
}
|
||||
|
||||
// Event is an array element in the response returned from /events
|
||||
type Event struct {
|
||||
EventType string `json:"event_type"`
|
||||
EventID string `json:"event_id"`
|
||||
Source Item `json:"source"`
|
||||
}
|
||||
|
||||
// Events is returned from /events
|
||||
type Events struct {
|
||||
ChunkSize int64 `json:"chunk_size"`
|
||||
Entries []Event `json:"entries"`
|
||||
NextStreamPosition int64 `json:"next_stream_position"`
|
||||
}
|
||||
|
||||
@@ -149,23 +149,6 @@ func init() {
|
||||
Default: "",
|
||||
Help: "Only show items owned by the login (email address) passed in.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user ID when using a service account.
|
||||
|
||||
Settng this flag allows rclone, when using a JWT service account, to
|
||||
act on behalf of another user by setting the as-user header.
|
||||
|
||||
The user ID is the Box identifier for a user. User IDs can found for
|
||||
any user via the GET /users endpoint, which is only available to
|
||||
admins, or by calling the GET /users/me endpoint with an authenticated
|
||||
user session.
|
||||
|
||||
See: https://developer.box.com/guides/authentication/jwt/as-user/
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -279,29 +262,19 @@ type Options struct {
|
||||
AccessToken string `config:"access_token"`
|
||||
ListChunk int `config:"list_chunk"`
|
||||
OwnedBy string `config:"owned_by"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// ItemMeta defines metadata we cache for each Item ID
|
||||
type ItemMeta struct {
|
||||
SequenceID int64 // the most recent event processed for this item
|
||||
ParentID string // ID of the parent directory of this item
|
||||
Name string // leaf name of this item
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
|
||||
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
}
|
||||
|
||||
// Object describes a box object
|
||||
@@ -449,14 +422,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
itemMetaCacheMu: new(sync.Mutex),
|
||||
itemMetaCache: make(map[string]ItemMeta),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -469,11 +440,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
}
|
||||
|
||||
// If using impersonate set an as-user header
|
||||
if f.opt.Impersonate != "" {
|
||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
@@ -716,17 +682,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Cache some metadata for this Item to help us process events later
|
||||
// on. In particular, the box event API does not provide the old path
|
||||
// of the Item when it is renamed/deleted/moved/etc.
|
||||
f.itemMetaCacheMu.Lock()
|
||||
cachedItemMeta, found := f.itemMetaCache[info.ID]
|
||||
if !found || cachedItemMeta.SequenceID < info.SequenceID {
|
||||
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
|
||||
}
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1166,7 +1121,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var (
|
||||
deleteErrors atomic.Uint64
|
||||
deleteErrors = int64(0)
|
||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
@@ -1182,7 +1137,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||
deleteErrors.Add(1)
|
||||
atomic.AddInt64(&deleteErrors, 1)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
@@ -1191,250 +1146,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors.Load() != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
|
||||
if deleteErrors != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the `stream_position` early so all changes from now on get processed
|
||||
streamPosition, err := f.changeNotifyStreamPosition(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||
}
|
||||
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
if pollInterval != 0 {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
if streamPosition == "" {
|
||||
streamPosition, err = f.changeNotifyStreamPosition(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/events",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("stream_position", "now")
|
||||
opts.Parameters.Set("stream_type", "changes")
|
||||
|
||||
var result api.Events
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strconv.FormatInt(result.NextStreamPosition, 10), nil
|
||||
}
|
||||
|
||||
// Attempts to construct the full path for an object, given the ID of its
|
||||
// parent directory and the name of the object.
|
||||
//
|
||||
// Can return "" if the parentID is not currently in the directory cache.
|
||||
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
||||
fullPath = ""
|
||||
name := f.opt.Enc.ToStandardName(childName)
|
||||
if parentID != "" {
|
||||
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
|
||||
if len(parentDir) > 0 {
|
||||
fullPath = parentDir + "/" + name
|
||||
} else {
|
||||
fullPath = name
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No parent, this object is at the root
|
||||
fullPath = name
|
||||
}
|
||||
return fullPath
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
|
||||
nextStreamPosition = streamPosition
|
||||
|
||||
// box can send duplicate Event IDs; filter any in a single notify run
|
||||
processedEventIDs := make(map[string]bool)
|
||||
|
||||
for {
|
||||
limit := f.opt.ListChunk
|
||||
|
||||
// box only allows a max of 500 events
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/events",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("stream_position", nextStreamPosition)
|
||||
opts.Parameters.Set("stream_type", "changes")
|
||||
opts.Parameters.Set("limit", strconv.Itoa(limit))
|
||||
|
||||
var result api.Events
|
||||
var resp *http.Response
|
||||
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if result.ChunkSize != int64(len(result.Entries)) {
|
||||
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
|
||||
}
|
||||
|
||||
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
|
||||
if result.ChunkSize == 0 {
|
||||
return nextStreamPosition, nil
|
||||
}
|
||||
|
||||
type pathToClear struct {
|
||||
path string
|
||||
entryType fs.EntryType
|
||||
}
|
||||
var pathsToClear []pathToClear
|
||||
newEventIDs := 0
|
||||
for _, entry := range result.Entries {
|
||||
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
|
||||
continue
|
||||
}
|
||||
processedEventIDs[entry.EventID] = true
|
||||
newEventIDs++
|
||||
|
||||
if entry.Source.ID == "" { // missing File or Folder ID
|
||||
continue
|
||||
}
|
||||
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
||||
continue
|
||||
}
|
||||
|
||||
// Only interested in event types that result in a file tree change
|
||||
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
||||
continue
|
||||
}
|
||||
|
||||
f.itemMetaCacheMu.Lock()
|
||||
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
|
||||
if cachedItemMetaFound {
|
||||
if itemMeta.SequenceID >= entry.Source.SequenceID {
|
||||
// Item in the cache has the same or newer SequenceID than
|
||||
// this event. Ignore this event, it must be old.
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
// This event is newer. Delete its entry from the cache,
|
||||
// we'll notify about its change below, then it's up to a
|
||||
// future list operation to repopulate the cache.
|
||||
delete(f.itemMetaCache, entry.Source.ID)
|
||||
}
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
|
||||
entryType := fs.EntryDirectory
|
||||
if entry.Source.Type == api.ItemTypeFile {
|
||||
entryType = fs.EntryObject
|
||||
}
|
||||
|
||||
// The box event only includes the new path for the object (e.g.
|
||||
// the path after the object was moved). If there was an old path
|
||||
// saved in our cache, it must be cleared.
|
||||
if cachedItemMetaFound {
|
||||
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
||||
if path != "" {
|
||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||
}
|
||||
|
||||
// If this is a directory, also delete it from the dir cache.
|
||||
// This will effectively invalidate the item metadata cache
|
||||
// entries for all descendents of this directory, since we
|
||||
// will no longer be able to construct a full path for them.
|
||||
// This is exactly what we want, since we don't want to notify
|
||||
// on the paths of these descendents if one of their ancestors
|
||||
// has been renamed/deleted.
|
||||
if entry.Source.Type == api.ItemTypeFolder {
|
||||
f.dirCache.FlushDir(path)
|
||||
}
|
||||
}
|
||||
|
||||
// If the item is "active", then it is not trashed or deleted, so
|
||||
// it potentially has a valid parent.
|
||||
//
|
||||
// Construct the new path of the object, based on the Parent ID
|
||||
// and its name. If we get an empty result, it means we don't
|
||||
// currently know about this object so notification is unnecessary.
|
||||
if entry.Source.ItemStatus == api.ItemStatusActive {
|
||||
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
||||
if path != "" {
|
||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// box can sometimes repeatedly return the same Event IDs within a
|
||||
// short period of time. If it stops giving us new ones, treat it
|
||||
// the same as if it returned us none at all.
|
||||
if newEventIDs == 0 {
|
||||
return nextStreamPosition, nil
|
||||
}
|
||||
|
||||
notifiedPaths := make(map[string]bool)
|
||||
for _, p := range pathsToClear {
|
||||
if _, ok := notifiedPaths[p.path]; ok {
|
||||
continue
|
||||
}
|
||||
notifiedPaths[p.path] = true
|
||||
notifyFunc(p.path, p.entryType)
|
||||
}
|
||||
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
|
||||
}
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
||||
@@ -40,7 +40,6 @@ func TestIntegration(t *testing.T) {
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
|
||||
@@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return do(ctx, uRemote, expire, unlink)
|
||||
}
|
||||
|
||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
|
||||
@@ -257,16 +257,6 @@ func isMetadataFile(filename string) bool {
|
||||
return strings.HasSuffix(filename, metaFileExt)
|
||||
}
|
||||
|
||||
// Checks whether a file is a metadata file and returns the original
|
||||
// file name and a flag indicating whether it was a metadata file or
|
||||
// not.
|
||||
func unwrapMetadataFile(filename string) (string, bool) {
|
||||
if !isMetadataFile(filename) {
|
||||
return "", false
|
||||
}
|
||||
return filename[:len(filename)-len(metaFileExt)], true
|
||||
}
|
||||
|
||||
// makeDataName generates the file name for a data file with specified compression mode
|
||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||
if mode != Uncompressed {
|
||||
@@ -989,8 +979,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||
var (
|
||||
wrappedPath string
|
||||
isMetadataFile bool
|
||||
wrappedPath string
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
@@ -998,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
case fs.EntryObject:
|
||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
||||
if !isMetadataFile {
|
||||
return
|
||||
}
|
||||
wrappedPath = makeMetadataName(path)
|
||||
default:
|
||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
|
||||
@@ -14,26 +14,23 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var defaultOpt = fstests.Opt{
|
||||
RemoteName: "TestCompress:",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &defaultOpt)
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{}}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||
name := "TestCompressGzip"
|
||||
opt := defaultOpt
|
||||
opt.RemoteName = name + ":"
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
|
||||
@@ -594,7 +594,7 @@ This resource key requirement only applies to a subset of old files.
|
||||
|
||||
Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is not needed.
|
||||
resource key is no needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
|
||||
@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
|
||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||
|
||||
func parseFichierError(err error) int {
|
||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 0 {
|
||||
return 0
|
||||
}
|
||||
code, err := strconv.Atoi(matches[1])
|
||||
code, err := strconv.Atoi(matches[0])
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||
return 0
|
||||
@@ -408,32 +408,6 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
||||
request := &MoveDirRequest{
|
||||
FolderID: folderID,
|
||||
DestinationFolderID: destinationFolderID,
|
||||
Rename: newLeaf,
|
||||
// DestinationUser: destinationUser,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveDirResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
|
||||
@@ -488,51 +488,6 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove.
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists.
|
||||
//
|
||||
// This is complicated by the fact that we can't use moveDir to move
|
||||
// to a different directory AND rename at the same time as it can
|
||||
// overwrite files in the source directory.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcIDnumeric, err := strconv.Atoi(srcID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp *MoveDirResponse
|
||||
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't rename leaf: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
||||
}
|
||||
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
@@ -606,7 +561,6 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -70,22 +70,6 @@ type MoveFileResponse struct {
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// MoveDirRequest is the request structure of the corresponding request
|
||||
type MoveDirRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
||||
DestinationUser string `json:"destination_user"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveDirResponse is the response structure of the corresponding request
|
||||
type MoveDirResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
OldName string `json:"old_name"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
|
||||
@@ -158,9 +158,9 @@ type Fs struct {
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
token string // current access token
|
||||
tokenExpiry time.Time // time the current token expires
|
||||
tokenExpired atomic.Int32
|
||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||
precision time.Duration // precision reported
|
||||
tokenExpired int32 // read and written with atomic
|
||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||
precision time.Duration // precision reported
|
||||
}
|
||||
|
||||
// Object describes a filefabric object
|
||||
@@ -243,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
||||
err = status // return the error from the RPC
|
||||
code := status.GetCode()
|
||||
if code == "login_token_expired" {
|
||||
f.tokenExpired.Add(1)
|
||||
atomic.AddInt32(&f.tokenExpired, 1)
|
||||
} else {
|
||||
for _, retryCode := range retryStatusCodes {
|
||||
if code == retryCode.code {
|
||||
@@ -323,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
var refreshed = false
|
||||
defer func() {
|
||||
if refreshed {
|
||||
f.tokenExpired.Store(0)
|
||||
atomic.StoreInt32(&f.tokenExpired, 0)
|
||||
}
|
||||
f.tokenMu.Unlock()
|
||||
}()
|
||||
|
||||
expired := f.tokenExpired.Load() != 0
|
||||
expired := atomic.LoadInt32(&f.tokenExpired) != 0
|
||||
if expired {
|
||||
fs.Debugf(f, "Token invalid - refreshing")
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -175,18 +174,6 @@ Enabled by default. Use 0 to disable.`,
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -231,7 +218,6 @@ type Options struct {
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -249,6 +235,7 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -361,36 +348,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Get a TLS config with a unique session cache.
|
||||
//
|
||||
// We can't share session caches between connections.
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/7234
|
||||
func (f *Fs) tlsConfig() *tls.Config {
|
||||
var tlsConfig *tls.Config
|
||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if f.opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||
}
|
||||
if f.opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
|
||||
// tls.Config for this connection only. Will be used for data
|
||||
// and control connections.
|
||||
tlsConfig := f.tlsConfig()
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
initialConnection := true
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
@@ -398,17 +359,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Connect using cleartext only for non TLS
|
||||
if tlsConfig == nil {
|
||||
if f.tlsConf == nil {
|
||||
return conn, nil
|
||||
}
|
||||
// Initial connection only needs to be cleartext for explicit TLS
|
||||
@@ -417,7 +373,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return conn, nil
|
||||
}
|
||||
// Upgrade connection to TLS
|
||||
tlsConn := tls.Client(conn, tlsConfig)
|
||||
tlsConn := tls.Client(conn, f.tlsConf)
|
||||
// Do the initial handshake - tls.Client doesn't do it for us
|
||||
// If we do this then connections to proftpd/pureftpd lock up
|
||||
// See: https://github.com/rclone/rclone/issues/6426
|
||||
@@ -439,9 +395,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -596,6 +552,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||
}
|
||||
if opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
@@ -608,6 +577,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*hasher.Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
|
||||
@@ -67,13 +67,9 @@ const (
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
@@ -142,11 +138,8 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
@@ -245,32 +238,17 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia_se": // telia_se cloud config
|
||||
case "telia": // telia cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
ClientID: telianoCloudClientID,
|
||||
ClientID: teliaCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -244,7 +243,7 @@ type Fs struct {
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported atomic.Int32 // whether xattrs are supported
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -292,7 +291,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported.Store(1)
|
||||
f.xattrSupported = 1
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
@@ -642,13 +641,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
localPath := f.localPath(dir)
|
||||
if fi, err := os.Stat(localPath); err != nil {
|
||||
return err
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
return os.Remove(f.localPath(dir))
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
@@ -6,6 +6,7 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
@@ -27,7 +28,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
||||
// Show xattrs not supported
|
||||
if f.xattrSupported.CompareAndSwap(1, 0) {
|
||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
||||
}
|
||||
return true
|
||||
@@ -40,7 +41,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
@@ -89,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
|
||||
@@ -767,17 +767,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiError, ok := err.(*Error); ok {
|
||||
// Work around a bug maybe in opendrive or maybe in rclone.
|
||||
//
|
||||
// We should know whether the folder exists or not by the call to
|
||||
// FindDir above so exactly why it is not found here is a mystery.
|
||||
//
|
||||
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
|
||||
if apiError.Info.Message == "Folder is already deleted" {
|
||||
return fs.DirEntries{}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get folder list: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -127,3 +128,18 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ package oracleobjectstorage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -197,32 +196,6 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
||||
}
|
||||
|
||||
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pastUploads) > 0 {
|
||||
sort.Slice(pastUploads, func(i, j int) bool {
|
||||
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
||||
})
|
||||
return pastUploads[:1], nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
@@ -244,13 +217,7 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
if exact {
|
||||
if *item.Object == directory {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
} else {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
@@ -259,34 +226,3 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
|
||||
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
req := objectstorage.ListMultipartUploadPartsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
Limit: common.Int(1000),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadPartsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return uploadedParts, err
|
||||
}
|
||||
for _, item := range response.Items {
|
||||
uploadedParts[*item.PartNumber] = item
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
@@ -1,441 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// Info needed for an upload
|
||||
type uploadInfo struct {
|
||||
req *objectstorage.PutObjectRequest
|
||||
md5sumHex string
|
||||
}
|
||||
|
||||
type objectChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
bucket *string
|
||||
key *string
|
||||
uploadID *string
|
||||
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
|
||||
partsToCommitMu sync.Mutex
|
||||
existingParts map[int]objectstorage.MultipartUploadPartSummary
|
||||
eTag string
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
ui uploadInfo
|
||||
o *Object
|
||||
}
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(
|
||||
ctx context.Context,
|
||||
remote string,
|
||||
src fs.ObjectInfo,
|
||||
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48 GiB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
|
||||
}
|
||||
bucketName, bucketPath := o.split()
|
||||
chunkWriter := &objectChunkWriter{
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: &bucketName,
|
||||
key: &bucketPath,
|
||||
uploadID: &uploadID,
|
||||
existingParts: existingParts,
|
||||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(2)
|
||||
}
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// If no data read, don't write the chunk
|
||||
if currentChunkSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
if existing, ok := w.existingParts[ossPartNumber]; ok {
|
||||
if md5sum == *existing.Md5 {
|
||||
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
|
||||
w.addCompletedPart(existing.PartNumber, existing.Etag)
|
||||
return currentChunkSize, nil
|
||||
}
|
||||
}
|
||||
req := objectstorage.UploadPartRequest{
|
||||
NamespaceName: common.String(w.f.opt.Namespace),
|
||||
BucketName: w.bucket,
|
||||
ObjectName: w.key,
|
||||
UploadId: w.uploadID,
|
||||
UploadPartNum: common.Int(ossPartNumber),
|
||||
ContentLength: common.Int64(currentChunkSize),
|
||||
ContentMD5: common.String(md5sum),
|
||||
}
|
||||
w.o.applyPartUploadOptions(w.ui.req, &req)
|
||||
var resp objectstorage.UploadPartResponse
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.UploadPartBody = io.NopCloser(reader)
|
||||
resp, err = w.f.srv.UploadPart(ctx, req)
|
||||
if err != nil {
|
||||
if ossPartNumber <= 8 {
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
|
||||
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
}
|
||||
w.addCompletedPart(&ossPartNumber, resp.ETag)
|
||||
return currentChunkSize, err
|
||||
|
||||
}
|
||||
|
||||
// add a part number and etag to the completed parts
|
||||
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
|
||||
w.partsToCommitMu.Lock()
|
||||
defer w.partsToCommitMu.Unlock()
|
||||
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
|
||||
PartNum: partNum,
|
||||
Etag: eTag,
|
||||
})
|
||||
}
|
||||
|
||||
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
|
||||
req := objectstorage.CommitMultipartUploadRequest{
|
||||
NamespaceName: common.String(w.f.opt.Namespace),
|
||||
BucketName: w.bucket,
|
||||
ObjectName: w.key,
|
||||
UploadId: w.uploadID,
|
||||
}
|
||||
req.PartsToCommit = w.partsToCommit
|
||||
var resp objectstorage.CommitMultipartUploadResponse
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
|
||||
// if multipart is corrupted, we will abort the uploadId
|
||||
if isMultiPartUploadCorrupted(err) {
|
||||
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
|
||||
_ = w.Abort(ctx)
|
||||
return false, err
|
||||
}
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.eTag = *resp.ETag
|
||||
hashOfHashes := md5.Sum(w.md5s)
|
||||
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
|
||||
gotMultipartMd5 := *resp.OpcMultipartMd5
|
||||
if wantMultipartMd5 != gotMultipartMd5 {
|
||||
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
|
||||
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isMultiPartUploadCorrupted(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// Check if this oci-err object, and if it is multipart commit error
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "InvalidUploadPart" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *objectChunkWriter) Abort(ctx context.Context) error {
|
||||
fs.Debugf(w.o, "Cancelling multipart upload")
|
||||
err := w.o.fs.abortMultiPartUpload(
|
||||
ctx,
|
||||
w.bucket,
|
||||
w.key,
|
||||
w.uploadID)
|
||||
if err != nil {
|
||||
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
|
||||
} else {
|
||||
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// addMd5 adds a binary md5 to the md5 calculated so far
|
||||
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
||||
w.md5sMu.Lock()
|
||||
defer w.md5sMu.Unlock()
|
||||
start := chunkNumber * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
ui.req = &objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
pv := common.String(v)
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "cache-control":
|
||||
ui.req.CacheControl = pv
|
||||
case "content-disposition":
|
||||
ui.req.ContentDisposition = pv
|
||||
case "content-encoding":
|
||||
ui.req.ContentEncoding = pv
|
||||
case "content-language":
|
||||
ui.req.ContentLanguage = pv
|
||||
case "content-type":
|
||||
ui.req.ContentType = pv
|
||||
case "tier":
|
||||
// ignore
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
case "btime":
|
||||
// write as metadata since we can't set it
|
||||
ui.req.OpcMeta[k] = v
|
||||
default:
|
||||
ui.req.OpcMeta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Set the mtime in the metadata
|
||||
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
size := src.Size()
|
||||
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var md5sumBase64 string
|
||||
if !isMultipart || !o.fs.opt.DisableChecksum {
|
||||
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if isMultipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set the content type if it isn't set already
|
||||
if ui.req.ContentType == nil {
|
||||
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
|
||||
}
|
||||
if size >= 0 {
|
||||
ui.req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
o.applyPutOptions(ui.req, options...)
|
||||
useBYOKPutObject(o.fs, ui.req)
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
ui.req.StorageTier = storageTier
|
||||
}
|
||||
// Check metadata keys and values are valid
|
||||
for key, value := range ui.req.OpcMeta {
|
||||
if !httpguts.ValidHeaderFieldName(key) {
|
||||
fs.Errorf(o, "Dropping invalid metadata key %q", key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
} else if value == "" {
|
||||
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
} else if !httpguts.ValidHeaderFieldValue(value) {
|
||||
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
}
|
||||
}
|
||||
return ui, nil
|
||||
}
|
||||
|
||||
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
|
||||
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
f := o.fs
|
||||
if f.opt.AttemptResumeUpload {
|
||||
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
|
||||
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
|
||||
if err == nil && len(resumeUploads) > 0 {
|
||||
uploadID = *resumeUploads[0].UploadId
|
||||
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
|
||||
if err == nil {
|
||||
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
}
|
||||
}
|
||||
req := objectstorage.CreateMultipartUploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
req.Object = common.String(bucketPath)
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultipartUploadOptions(putReq, &req)
|
||||
|
||||
var resp objectstorage.CreateMultipartUploadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", existingParts, err
|
||||
}
|
||||
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
uploadID = *resp.UploadId
|
||||
fs.Debugf(o, "created new upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
@@ -4,14 +4,12 @@
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -20,8 +18,10 @@ import (
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -367,28 +367,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
func isZeroLength(streamReader io.Reader) bool {
|
||||
switch v := streamReader.(type) {
|
||||
case *bytes.Buffer:
|
||||
return v.Len() == 0
|
||||
case *bytes.Reader:
|
||||
return v.Len() == 0
|
||||
case *strings.Reader:
|
||||
return v.Len() == 0
|
||||
case *os.File:
|
||||
fi, err := v.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.Size() == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, _ := o.split()
|
||||
bucketName, bucketPath := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -396,24 +377,142 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if isZeroLength(in) {
|
||||
multipart = false
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
err = o.uploadMultipart(ctx, src, in)
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
ui.req.PutObjectBody = io.NopCloser(in)
|
||||
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -492,24 +591,28 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) {
|
||||
req.ContentType = putReq.ContentType
|
||||
req.ContentLanguage = putReq.ContentLanguage
|
||||
req.ContentEncoding = putReq.ContentEncoding
|
||||
req.ContentDisposition = putReq.ContentDisposition
|
||||
req.CacheControl = putReq.CacheControl
|
||||
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta)
|
||||
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
|
||||
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
|
||||
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
|
||||
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
|
||||
}
|
||||
|
||||
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) {
|
||||
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
|
||||
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
|
||||
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
|
||||
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
|
||||
@@ -13,10 +13,9 @@ import (
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
maxUploadParts = 10000
|
||||
defaultUploadConcurrency = 10
|
||||
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
@@ -56,14 +55,12 @@ type Options struct {
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
AttemptResumeUpload bool `config:"attempt_resume_upload"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
@@ -160,8 +157,9 @@ The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -183,20 +181,6 @@ statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "max_upload_parts",
|
||||
Help: `Maximum number of parts in a multipart upload.
|
||||
|
||||
This option defines the maximum number of multipart chunks to use
|
||||
when doing a multipart upload.
|
||||
|
||||
OCI has max parts limit of 10,000 chunks.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of a known size to stay below this number of chunks limit.
|
||||
`,
|
||||
Default: maxUploadParts,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
@@ -254,24 +238,12 @@ to start uploading.`,
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "attempt_resume_upload",
|
||||
Help: `If true attempt to resume previously started multipart upload for the object.
|
||||
This will be helpful to speed up multipart transfers by resuming uploads from past session.
|
||||
|
||||
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
|
||||
aborted and a new multipart upload is started with the new chunk size.
|
||||
|
||||
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
|
||||
@@ -318,6 +318,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
@@ -557,15 +558,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
|
||||
if uploadID == nil || *uploadID == "" {
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: bucketName,
|
||||
ObjectName: bucketPath,
|
||||
UploadId: uploadID,
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
@@ -588,7 +589,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
|
||||
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
@@ -683,13 +684,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +0,0 @@
|
||||
package protondrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/protondrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestProtonDrive:",
|
||||
NilObject: (*protondrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
610
backend/s3/s3.go
610
backend/s3/s3.go
@@ -53,13 +53,13 @@ import (
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -2279,16 +2279,17 @@ very small even with this flag.
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: fs.Duration(time.Minute),
|
||||
Default: memoryPoolFlushTime,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: false,
|
||||
Default: memoryPoolUseMmap,
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
}, {
|
||||
Name: "disable_http2",
|
||||
Default: false,
|
||||
@@ -2439,7 +2440,10 @@ const (
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
maxExpireDuration = fs.Duration(7 * 24 * time.Hour) // max expiry is 1 week
|
||||
)
|
||||
|
||||
@@ -2539,6 +2543,8 @@ type Options struct {
|
||||
NoHead bool `config:"no_head"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
@@ -2568,6 +2574,7 @@ type Fs struct {
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
srv *http.Client // a plain http client
|
||||
srvRest *rest.Client // the rest connection to the server
|
||||
pool *pool.Pool // memory pool
|
||||
etagIsNotMD5 bool // if set ETags are not MD5s
|
||||
versioningMu sync.Mutex
|
||||
versioning fs.Tristate // if set bucket is using versions
|
||||
@@ -3115,6 +3122,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(nil, "name = %q, root = %q, opt = %#v", name, root, opt)
|
||||
err = checkUploadChunkSize(opt.ChunkSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3: chunk size: %w", err)
|
||||
@@ -3168,6 +3176,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
cache: bucket.NewCache(),
|
||||
srv: srv,
|
||||
srvRest: rest.NewClient(fshttp.NewClient(ctx)),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
opt.UploadConcurrency*ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
if opt.ServerSideEncryption == "aws:kms" || opt.SSECustomerAlgorithm != "" {
|
||||
// From: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
|
||||
@@ -4362,6 +4376,19 @@ func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.opt.UploadConcurrency*f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
|
||||
if strings.HasSuffix(remote, "/") {
|
||||
@@ -4394,17 +4421,17 @@ to normal storage.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||
rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
|
||||
rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
It returns a list of status dictionaries with Remote and Status
|
||||
keys. The Status will be OK if it was successful or an error message
|
||||
@@ -4504,26 +4531,6 @@ supplied.
|
||||
It may return "Enabled", "Suspended" or "Unversioned". Note that once versioning
|
||||
has been enabled the status can't be set back to "Unversioned".
|
||||
`,
|
||||
}, {
|
||||
Name: "set",
|
||||
Short: "Set command for updating the config parameters.",
|
||||
Long: `This set command can be used to update the config parameters
|
||||
for a running s3 backend.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
|
||||
rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
|
||||
|
||||
The option keys are named as they are in the config file.
|
||||
|
||||
This rebuilds the connection to the s3 backend when it is called with
|
||||
the new parameters. Only new parameters need be passed as the values
|
||||
will default to those currently in use.
|
||||
|
||||
It doesn't return anything.
|
||||
`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@@ -4618,25 +4625,6 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
return nil, f.CleanUpHidden(ctx)
|
||||
case "versioning":
|
||||
return f.setGetVersioning(ctx, arg...)
|
||||
case "set":
|
||||
newOpt := f.opt
|
||||
err := configstruct.Set(configmap.Simple(opt), &newOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
c, ses, err := s3Connection(f.ctx, &newOpt, f.srv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updating session: %w", err)
|
||||
}
|
||||
f.c = c
|
||||
f.ses = ses
|
||||
f.opt = newOpt
|
||||
keys := []string{}
|
||||
for k := range opt {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@@ -4871,10 +4859,6 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
// If the root is a dirmarker it will have lost its trailing /
|
||||
if remote == "" {
|
||||
remote = "/"
|
||||
}
|
||||
oi, err := f.newObjectWithInfo(ctx, remote, object, versionID)
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
@@ -5332,43 +5316,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// state of ChunkWriter
|
||||
type s3ChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
bucket *string
|
||||
key *string
|
||||
uploadID *string
|
||||
multiPartUploadInput *s3.CreateMultipartUploadInput
|
||||
completedPartsMu sync.Mutex
|
||||
completedParts []*s3.CompletedPart
|
||||
eTag string
|
||||
versionID string
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
ui uploadInfo
|
||||
o *Object
|
||||
}
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (wantETag, gotETag string, versionID *string, err error) {
|
||||
f := o.fs
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
// make concurrency machinery
|
||||
concurrency := f.opt.UploadConcurrency
|
||||
if concurrency < 1 {
|
||||
concurrency = 1
|
||||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
//structs.SetFrom(&mReq, req)
|
||||
var mReq s3.CreateMultipartUploadInput
|
||||
setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(&mReq, ui.req)
|
||||
tokens := pacer.NewTokenDispenser(concurrency)
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
@@ -5376,10 +5332,9 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
partSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
@@ -5387,204 +5342,187 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
partSize = chunksize.Calculator(o, size, uploadParts, f.opt.ChunkSize)
|
||||
}
|
||||
|
||||
var mOut *s3.CreateMultipartUploadOutput
|
||||
memPool := f.getMemoryPool(int64(partSize))
|
||||
|
||||
var mReq s3.CreateMultipartUploadInput
|
||||
//structs.SetFrom(&mReq, req)
|
||||
setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(&mReq, req)
|
||||
var cout *s3.CreateMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
mOut, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||
var err error
|
||||
cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("create multipart upload failed: %w", err)
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
chunkWriter := &s3ChunkWriter{
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: mOut.Bucket,
|
||||
key: mOut.Key,
|
||||
uploadID: mOut.UploadId,
|
||||
multiPartUploadInput: &mReq,
|
||||
completedParts: make([]*s3.CompletedPart, 0),
|
||||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload: %v", *mOut.UploadId)
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// add a part number and etag to the completed parts
|
||||
func (w *s3ChunkWriter) addCompletedPart(partNum *int64, eTag *string) {
|
||||
w.completedPartsMu.Lock()
|
||||
defer w.completedPartsMu.Unlock()
|
||||
w.completedParts = append(w.completedParts, &s3.CompletedPart{
|
||||
PartNumber: partNum,
|
||||
ETag: eTag,
|
||||
})
|
||||
}
|
||||
|
||||
// addMd5 adds a binary md5 to the md5 calculated so far
|
||||
func (w *s3ChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
||||
w.md5sMu.Lock()
|
||||
defer w.md5sMu.Unlock()
|
||||
start := chunkNumber * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(3)
|
||||
}
|
||||
|
||||
// create checksum of buffer for integrity checking
|
||||
// currently there is no way to calculate the md5 without reading the chunk a 2nd time (1st read is in uploadMultipart)
|
||||
// possible in AWS SDK v2 with trailers?
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// If no data read and not the first chunk, don't write the chunk
|
||||
if currentChunkSize == 0 && chunkNumber != 0 {
|
||||
return 0, nil
|
||||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// S3 requires 1 <= PartNumber <= 10000
|
||||
s3PartNumber := aws.Int64(int64(chunkNumber + 1))
|
||||
uploadPartReq := &s3.UploadPartInput{
|
||||
Body: reader,
|
||||
Bucket: w.bucket,
|
||||
Key: w.key,
|
||||
PartNumber: s3PartNumber,
|
||||
UploadId: w.uploadID,
|
||||
ContentMD5: &md5sum,
|
||||
ContentLength: aws.Int64(currentChunkSize),
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
SSECustomerAlgorithm: w.multiPartUploadInput.SSECustomerAlgorithm,
|
||||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||
}
|
||||
var uout *s3.UploadPartOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
uploadCtx, cancel := context.WithCancel(ctx)
|
||||
defer atexit.OnError(&err, func() {
|
||||
cancel()
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
uout, err = w.f.c.UploadPartWithContext(ctx, uploadPartReq)
|
||||
if err != nil {
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := f.pacer.Call(func() (bool, error) {
|
||||
_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
UploadId: uid,
|
||||
RequestPayer: req.RequestPayer,
|
||||
})
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(uploadCtx)
|
||||
finished = false
|
||||
partsMu sync.Mutex // to protect parts
|
||||
parts []*s3.CompletedPart
|
||||
off int64
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
)
|
||||
|
||||
addMd5 := func(md5binary *[md5.Size]byte, partNum int64) {
|
||||
md5sMu.Lock()
|
||||
defer md5sMu.Unlock()
|
||||
start := partNum * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(md5s)); extend > 0 {
|
||||
md5s = append(md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
for partNum := int64(1); !finished; partNum++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
// return the memory and token
|
||||
memPool.Put(buf)
|
||||
tokens.Put()
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 && partNum != 1 { // end if no data and if not first chunk
|
||||
free()
|
||||
break
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
w.addCompletedPart(s3PartNumber, uout.ETag)
|
||||
partNum := partNum
|
||||
fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
|
||||
off += int64(n)
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
partLength := int64(len(buf))
|
||||
|
||||
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes and etag %v", chunkNumber+1, currentChunkSize, *uout.ETag)
|
||||
return currentChunkSize, err
|
||||
}
|
||||
// create checksum of buffer for integrity checking
|
||||
md5sumBinary := md5.Sum(buf)
|
||||
addMd5(&md5sumBinary, partNum-1)
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// Abort the multpart upload
|
||||
func (w *s3ChunkWriter) Abort(ctx context.Context) error {
|
||||
err := w.f.pacer.Call(func() (bool, error) {
|
||||
_, err := w.f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
|
||||
Bucket: w.bucket,
|
||||
Key: w.key,
|
||||
UploadId: w.uploadID,
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
uploadPartReq := &s3.UploadPartInput{
|
||||
Body: bytes.NewReader(buf),
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
PartNumber: &partNum,
|
||||
UploadId: uid,
|
||||
ContentMD5: &md5sum,
|
||||
ContentLength: &partLength,
|
||||
RequestPayer: req.RequestPayer,
|
||||
SSECustomerAlgorithm: req.SSECustomerAlgorithm,
|
||||
SSECustomerKey: req.SSECustomerKey,
|
||||
SSECustomerKeyMD5: req.SSECustomerKeyMD5,
|
||||
}
|
||||
uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
|
||||
if err != nil {
|
||||
if partNum <= int64(concurrency) {
|
||||
return f.shouldRetry(gCtx, err)
|
||||
}
|
||||
// retry all chunks once have done the first batch
|
||||
return true, err
|
||||
}
|
||||
partsMu.Lock()
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.ETag,
|
||||
})
|
||||
partsMu.Unlock()
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to abort multipart upload %q: %w", *w.uploadID, err)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload %q aborted", *w.uploadID)
|
||||
return err
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, err
|
||||
}
|
||||
|
||||
// Close and finalise the multipart upload
|
||||
func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
|
||||
// sort the completed parts by part number
|
||||
sort.Slice(w.completedParts, func(i, j int) bool {
|
||||
return *w.completedParts[i].PartNumber < *w.completedParts[j].PartNumber
|
||||
sort.Slice(parts, func(i, j int) bool {
|
||||
return *parts[i].PartNumber < *parts[j].PartNumber
|
||||
})
|
||||
|
||||
var resp *s3.CompleteMultipartUploadOutput
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
resp, err = w.f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: w.bucket,
|
||||
Key: w.key,
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.CompleteMultipartUploadWithContext(uploadCtx, &s3.CompleteMultipartUploadInput{
|
||||
Bucket: req.Bucket,
|
||||
Key: req.Key,
|
||||
MultipartUpload: &s3.CompletedMultipartUpload{
|
||||
Parts: w.completedParts,
|
||||
Parts: parts,
|
||||
},
|
||||
RequestPayer: w.multiPartUploadInput.RequestPayer,
|
||||
UploadId: w.uploadID,
|
||||
RequestPayer: req.RequestPayer,
|
||||
UploadId: uid,
|
||||
})
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
return f.shouldRetry(uploadCtx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload %q: %w", *w.uploadID, err)
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
if resp != nil {
|
||||
if resp.ETag != nil {
|
||||
w.eTag = *resp.ETag
|
||||
}
|
||||
if resp.VersionId != nil {
|
||||
w.versionID = *resp.VersionId
|
||||
gotETag = *resp.ETag
|
||||
}
|
||||
versionID = resp.VersionId
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload %q finished", *w.uploadID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) (wantETag, gotETag string, versionID *string, ui uploadInfo, err error) {
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, versionID, ui, err
|
||||
}
|
||||
|
||||
var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
|
||||
gotETag = s3cw.eTag
|
||||
versionID = aws.String(s3cw.versionID)
|
||||
|
||||
hashOfHashes := md5.Sum(s3cw.md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(s3cw.completedParts))
|
||||
|
||||
return wantETag, gotETag, versionID, s3cw.ui, nil
|
||||
return wantETag, gotETag, versionID, nil
|
||||
}
|
||||
|
||||
// unWrapAwsError unwraps AWS errors, looking for a non AWS error
|
||||
@@ -5714,25 +5652,25 @@ func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.P
|
||||
return etag, lastModified, versionID, nil
|
||||
}
|
||||
|
||||
// Info needed for an upload
|
||||
type uploadInfo struct {
|
||||
req *s3.PutObjectInput
|
||||
md5sumHex string
|
||||
}
|
||||
|
||||
// Prepare object for being uploaded
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
err := o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return ui, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
modTime := src.ModTime(ctx)
|
||||
size := src.Size()
|
||||
|
||||
ui.req = &s3.PutObjectInput{
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: stringPointerOrNil(o.fs.opt.ACL),
|
||||
Key: &bucketPath,
|
||||
@@ -5741,30 +5679,30 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
ui.req.Metadata = make(map[string]*string, len(meta)+2)
|
||||
req.Metadata = make(map[string]*string, len(meta)+2)
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
pv := aws.String(v)
|
||||
k = strings.ToLower(k)
|
||||
if o.fs.opt.NoSystemMetadata {
|
||||
ui.req.Metadata[k] = pv
|
||||
req.Metadata[k] = pv
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case "cache-control":
|
||||
ui.req.CacheControl = pv
|
||||
req.CacheControl = pv
|
||||
case "content-disposition":
|
||||
ui.req.ContentDisposition = pv
|
||||
req.ContentDisposition = pv
|
||||
case "content-encoding":
|
||||
ui.req.ContentEncoding = pv
|
||||
req.ContentEncoding = pv
|
||||
case "content-language":
|
||||
ui.req.ContentLanguage = pv
|
||||
req.ContentLanguage = pv
|
||||
case "content-type":
|
||||
ui.req.ContentType = pv
|
||||
req.ContentType = pv
|
||||
case "x-amz-tagging":
|
||||
ui.req.Tagging = pv
|
||||
req.Tagging = pv
|
||||
case "tier":
|
||||
// ignore
|
||||
case "mtime":
|
||||
@@ -5777,14 +5715,14 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
}
|
||||
case "btime":
|
||||
// write as metadata since we can't set it
|
||||
ui.req.Metadata[k] = pv
|
||||
req.Metadata[k] = pv
|
||||
default:
|
||||
ui.req.Metadata[k] = pv
|
||||
req.Metadata[k] = pv
|
||||
}
|
||||
}
|
||||
|
||||
// Set the mtime in the meta data
|
||||
ui.req.Metadata[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
req.Metadata[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non multipart
|
||||
@@ -5793,12 +5731,11 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||
@@ -5806,42 +5743,42 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
// - a multipart upload
|
||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
ui.req.Metadata[metaMD5Hash] = &md5sumBase64
|
||||
req.Metadata[metaMD5Hash] = &md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the content type if it isn't set already
|
||||
if ui.req.ContentType == nil {
|
||||
ui.req.ContentType = aws.String(fs.MimeType(ctx, src))
|
||||
// Set the content type it it isn't set already
|
||||
if req.ContentType == nil {
|
||||
req.ContentType = aws.String(fs.MimeType(ctx, src))
|
||||
}
|
||||
if size >= 0 {
|
||||
ui.req.ContentLength = &size
|
||||
req.ContentLength = &size
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
if o.fs.opt.RequesterPays {
|
||||
ui.req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||
req.RequestPayer = aws.String(s3.RequestPayerRequester)
|
||||
}
|
||||
if o.fs.opt.ServerSideEncryption != "" {
|
||||
ui.req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||
}
|
||||
if o.fs.opt.SSECustomerAlgorithm != "" {
|
||||
ui.req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
|
||||
}
|
||||
if o.fs.opt.SSECustomerKey != "" {
|
||||
ui.req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
req.SSECustomerKey = &o.fs.opt.SSECustomerKey
|
||||
}
|
||||
if o.fs.opt.SSECustomerKeyMD5 != "" {
|
||||
ui.req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
|
||||
}
|
||||
if o.fs.opt.SSEKMSKeyID != "" {
|
||||
ui.req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
|
||||
}
|
||||
if o.fs.opt.StorageClass != "" {
|
||||
ui.req.StorageClass = &o.fs.opt.StorageClass
|
||||
req.StorageClass = &o.fs.opt.StorageClass
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
@@ -5851,22 +5788,22 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
ui.req.CacheControl = aws.String(value)
|
||||
req.CacheControl = aws.String(value)
|
||||
case "content-disposition":
|
||||
ui.req.ContentDisposition = aws.String(value)
|
||||
req.ContentDisposition = aws.String(value)
|
||||
case "content-encoding":
|
||||
ui.req.ContentEncoding = aws.String(value)
|
||||
req.ContentEncoding = aws.String(value)
|
||||
case "content-language":
|
||||
ui.req.ContentLanguage = aws.String(value)
|
||||
req.ContentLanguage = aws.String(value)
|
||||
case "content-type":
|
||||
ui.req.ContentType = aws.String(value)
|
||||
req.ContentType = aws.String(value)
|
||||
case "x-amz-tagging":
|
||||
ui.req.Tagging = aws.String(value)
|
||||
req.Tagging = aws.String(value)
|
||||
default:
|
||||
const amzMetaPrefix = "x-amz-meta-"
|
||||
if strings.HasPrefix(lowerKey, amzMetaPrefix) {
|
||||
metaKey := lowerKey[len(amzMetaPrefix):]
|
||||
ui.req.Metadata[metaKey] = aws.String(value)
|
||||
req.Metadata[metaKey] = aws.String(value)
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
@@ -5874,48 +5811,30 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
}
|
||||
|
||||
// Check metadata keys and values are valid
|
||||
for key, value := range ui.req.Metadata {
|
||||
for key, value := range req.Metadata {
|
||||
if !httpguts.ValidHeaderFieldName(key) {
|
||||
fs.Errorf(o, "Dropping invalid metadata key %q", key)
|
||||
delete(ui.req.Metadata, key)
|
||||
delete(req.Metadata, key)
|
||||
} else if value == nil {
|
||||
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
|
||||
delete(ui.req.Metadata, key)
|
||||
delete(req.Metadata, key)
|
||||
} else if !httpguts.ValidHeaderFieldValue(*value) {
|
||||
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", *value, key)
|
||||
delete(ui.req.Metadata, key)
|
||||
delete(req.Metadata, key)
|
||||
}
|
||||
}
|
||||
|
||||
return ui, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
}
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotETag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
var versionID *string // versionID we got from the upload
|
||||
var err error
|
||||
var ui uploadInfo
|
||||
if multipart {
|
||||
wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in)
|
||||
wantETag, gotETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
} else {
|
||||
ui, err = o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
if o.fs.opt.UsePresignedRequest {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, ui.req, size, in)
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
} else {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, ui.req, size, in)
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -5935,8 +5854,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
head = new(s3.HeadObjectOutput)
|
||||
//structs.SetFrom(head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(head, ui.req)
|
||||
head.ETag = &ui.md5sumHex // doesn't matter quotes are missing
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// We get etag back from single and multipart upload so fill it in here
|
||||
if gotETag != "" {
|
||||
@@ -6074,17 +5993,16 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -17,17 +17,17 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count atomic.Int64
|
||||
var count int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
count.Add(1)
|
||||
atomic.AddInt64(&count, 1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := count.Load()
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -168,19 +169,7 @@ E.g. if shared folders can be found in directories representing volumes:
|
||||
|
||||
E.g. if home directory can be found in a shared folder called "home":
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory
|
||||
|
||||
To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path.
|
||||
|
||||
E.g. the first example above could be rewritten as:
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2
|
||||
|
||||
Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home".
|
||||
|
||||
E.g. the second example above should be rewritten as:
|
||||
|
||||
rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1`,
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -232,16 +221,7 @@ E.g. the second example above should be rewritten as:
|
||||
Default: "",
|
||||
Help: `Specifies the path or command to run a sftp server on the remote host.
|
||||
|
||||
The subsystem option is ignored when server_command is defined.
|
||||
|
||||
If adding server_command to the configuration file please note that
|
||||
it should not be enclosed in quotes, since that will make rclone fail.
|
||||
|
||||
A working example is:
|
||||
|
||||
[remote_name]
|
||||
type = sftp
|
||||
server_command = sudo /usr/libexec/openssh/sftp-server`,
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_fstat",
|
||||
@@ -408,47 +388,6 @@ Example:
|
||||
ssh-ed25519 ssh-rsa ssh-dss
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ssh",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Path and arguments to external ssh binary.
|
||||
|
||||
Normally rclone will use its internal ssh library to connect to the
|
||||
SFTP server. However it does not implement all possible ssh options so
|
||||
it may be desirable to use an external ssh binary.
|
||||
|
||||
Rclone ignores all the internal config if you use this option and
|
||||
expects you to configure the ssh binary with the user/host/port and
|
||||
any other options you need.
|
||||
|
||||
**Important** The ssh command must log in without asking for a
|
||||
password so needs to be configured with keys or certificates.
|
||||
|
||||
Rclone will run the command supplied either with the additional
|
||||
arguments "-s sftp" to access the SFTP subsystem or with commands such
|
||||
as "md5sum /path/to/file" appended to read checksums.
|
||||
|
||||
Any arguments with spaces in should be surrounded by "double quotes".
|
||||
|
||||
An example setting might be:
|
||||
|
||||
ssh -o ServerAliveInterval=20 user@example.com
|
||||
|
||||
Note that when using an external ssh binary rclone makes a new ssh
|
||||
connection for every hash it calculates.
|
||||
`,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -488,8 +427,6 @@ type Options struct {
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -512,7 +449,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
sessions atomic.Int32 // count in use sessions
|
||||
sessions int32 // count in use sessions
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -526,16 +463,41 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
conn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
|
||||
return ssh.NewClient(c, chans, reqs), nil
|
||||
}
|
||||
|
||||
// conn encapsulates an ssh client and corresponding sftp client
|
||||
type conn struct {
|
||||
sshClient sshClient
|
||||
sshClient *ssh.Client
|
||||
sftpClient *sftp.Client
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (c *conn) wait() {
|
||||
c.err <- c.sshClient.Wait()
|
||||
c.err <- c.sshClient.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (c *conn) sendKeepAlive() {
|
||||
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send keepalives every interval over the ssh connection until done is closed
|
||||
@@ -547,7 +509,7 @@ func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
c.sshClient.SendKeepAlive()
|
||||
c.sendKeepAlive()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
@@ -580,17 +542,17 @@ func (c *conn) closed() error {
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
f.sessions.Add(1)
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
}
|
||||
|
||||
// Show the ssh session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
f.sessions.Add(-1)
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return f.sessions.Load()
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
@@ -599,11 +561,7 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
if len(f.opt.SSH) == 0 {
|
||||
c.sshClient, err = f.newSSHClientInternal(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
} else {
|
||||
c.sshClient, err = f.newSSHClientExternal()
|
||||
}
|
||||
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SSH: %w", err)
|
||||
}
|
||||
@@ -617,7 +575,7 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
}
|
||||
|
||||
// Set any environment variables on the ssh.Session
|
||||
func (f *Fs) setEnv(s sshSession) error {
|
||||
func (f *Fs) setEnv(s *ssh.Session) error {
|
||||
for _, env := range f.opt.SetEnv {
|
||||
equal := strings.IndexRune(env, '=')
|
||||
if equal < 0 {
|
||||
@@ -634,8 +592,8 @@ func (f *Fs) setEnv(s sshSession) error {
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(client sshClient, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := client.NewSession()
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := conn.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -708,9 +666,6 @@ func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Getwd request
|
||||
func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
c := *pc
|
||||
if !c.sshClient.CanReuse() {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// work out if this is an expected error
|
||||
@@ -789,10 +744,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opt.SSH) != 0 && ((opt.User != currentUser && opt.User != "") || opt.Host != "" || (opt.Port != "22" && opt.Port != "")) {
|
||||
fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)")
|
||||
}
|
||||
|
||||
if opt.User == "" {
|
||||
opt.User = currentUser
|
||||
}
|
||||
@@ -845,7 +796,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (len(opt.SSH) == 0 && opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
|
||||
@@ -1065,8 +1016,8 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Failed to get shell session for shell type detection command: %v", err)
|
||||
} else {
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.SetStdout(&stdout)
|
||||
session.SetStderr(&stderr)
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
shellCmd := "echo ${ShellId}%ComSpec%"
|
||||
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
|
||||
err = session.Run(shellCmd)
|
||||
@@ -1476,8 +1427,8 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
}()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.SetStdout(&stdout)
|
||||
session.SetStderr(&stderr)
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
|
||||
fs.Debugf(f, "Running remote command: %s", cmd)
|
||||
err = session.Run(cmd)
|
||||
@@ -1784,9 +1735,6 @@ func (f *Fs) remotePath(remote string) string {
|
||||
func (f *Fs) remoteShellPath(remote string) string {
|
||||
if f.opt.PathOverride != "" {
|
||||
shellPath := path.Join(f.opt.PathOverride, remote)
|
||||
if f.opt.PathOverride[0] == '@' {
|
||||
shellPath = path.Join(strings.TrimPrefix(f.opt.PathOverride, "@"), f.absRoot, remote)
|
||||
}
|
||||
fs.Debugf(f, "Shell path redirected to %q with option path_override", shellPath)
|
||||
return shellPath
|
||||
}
|
||||
@@ -2044,10 +1992,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
}
|
||||
// Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
|
||||
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
return fmt.Errorf("Update Create failed: %w", err)
|
||||
}
|
||||
// remove the file if upload failed
|
||||
@@ -2067,18 +2014,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
remove()
|
||||
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||
}
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
remove()
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
// Release connection only when upload has finished so we don't upload multiple files on the same connection
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
|
||||
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
|
||||
@@ -30,13 +30,3 @@ func TestIntegration2(t *testing.T) {
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSFTPRcloneSSH:",
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import "io"
|
||||
|
||||
// Interfaces for ssh client and session implemented in ssh_internal.go and ssh_external.go
|
||||
|
||||
// An interface for an ssh client to abstract over internal ssh library and external binary
|
||||
type sshClient interface {
|
||||
// Wait blocks until the connection has shut down, and returns the
|
||||
// error causing the shutdown.
|
||||
Wait() error
|
||||
|
||||
// SendKeepAlive sends a keepalive message to keep the connection open
|
||||
SendKeepAlive()
|
||||
|
||||
// Close the connection
|
||||
Close() error
|
||||
|
||||
// NewSession opens a new sshSession for this sshClient. (A
|
||||
// session is a remote execution of a program.)
|
||||
NewSession() (sshSession, error)
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
CanReuse() bool
|
||||
}
|
||||
|
||||
// An interface for an ssh session to abstract over internal ssh library and external binary
|
||||
type sshSession interface {
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
Setenv(name, value string) error
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
Start(cmd string) error
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
StdinPipe() (io.WriteCloser, error)
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
StdoutPipe() (io.Reader, error)
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem
|
||||
// with the session on the remote host. A subsystem is a
|
||||
// predefined command that runs in the background when the ssh
|
||||
// session is initiated
|
||||
RequestSubsystem(subsystem string) error
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
Run(cmd string) error
|
||||
|
||||
// Close the session
|
||||
Close() error
|
||||
|
||||
// Set the stdout
|
||||
SetStdout(io.Writer)
|
||||
|
||||
// Set the stderr
|
||||
SetStderr(io.Writer)
|
||||
}
|
||||
@@ -1,223 +0,0 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Implement the sshClient interface for external ssh programs
|
||||
type sshClientExternal struct {
|
||||
f *Fs
|
||||
session *sshSessionExternal
|
||||
}
|
||||
|
||||
func (f *Fs) newSSHClientExternal() (sshClient, error) {
|
||||
return &sshClientExternal{f: f}, nil
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (s *sshClientExternal) Wait() error {
|
||||
if s.session == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (s *sshClientExternal) SendKeepAlive() {
|
||||
// Up to the user to configure -o ServerAliveInterval=20 on their ssh connections
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (s *sshClientExternal) Close() error {
|
||||
if s.session == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.Close()
|
||||
}
|
||||
|
||||
// NewSession makes a new external SSH connection
|
||||
func (s *sshClientExternal) NewSession() (sshSession, error) {
|
||||
session := s.f.newSSHSessionExternal()
|
||||
if s.session == nil {
|
||||
fs.Debugf(s.f, "ssh external: creating additional session")
|
||||
}
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
func (s *sshClientExternal) CanReuse() bool {
|
||||
if s.session == nil {
|
||||
return true
|
||||
}
|
||||
exited := s.session.exited()
|
||||
canReuse := !exited && s.session.runningSFTP
|
||||
// fs.Debugf(s.f, "ssh external: CanReuse %v, exited=%v runningSFTP=%v", canReuse, exited, s.session.runningSFTP)
|
||||
return canReuse
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshClient = &sshClientExternal{}
|
||||
|
||||
// implement the sshSession interface for external ssh binary
|
||||
type sshSessionExternal struct {
|
||||
f *Fs
|
||||
cmd *exec.Cmd
|
||||
cancel func()
|
||||
startCalled bool
|
||||
runningSFTP bool
|
||||
}
|
||||
|
||||
func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
||||
s := &sshSessionExternal{
|
||||
f: f,
|
||||
}
|
||||
|
||||
// Make a cancellation function for this to call in Close()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancel = cancel
|
||||
|
||||
// Connect to a remote host and request the sftp subsystem via
|
||||
// the 'ssh' command. This assumes that passwordless login is
|
||||
// correctly configured.
|
||||
ssh := append([]string(nil), s.f.opt.SSH...)
|
||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
// FIXME enable when we get rid of go1.19
|
||||
// s.cmd.WaitDelay = time.Second
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
func (s *sshSessionExternal) Setenv(name, value string) error {
|
||||
return errors.New("ssh external: can't set environment variables")
|
||||
}
|
||||
|
||||
const requestSubsystem = "***Subsystem***:"
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
func (s *sshSessionExternal) Start(cmd string) error {
|
||||
if s.startCalled {
|
||||
return errors.New("internal error: ssh external: command already running")
|
||||
}
|
||||
s.startCalled = true
|
||||
|
||||
// Adjust the args
|
||||
if strings.HasPrefix(cmd, requestSubsystem) {
|
||||
s.cmd.Args = append(s.cmd.Args, "-s", cmd[len(requestSubsystem):])
|
||||
s.runningSFTP = true
|
||||
} else {
|
||||
s.cmd.Args = append(s.cmd.Args, cmd)
|
||||
s.runningSFTP = false
|
||||
}
|
||||
|
||||
fs.Debugf(s.f, "ssh external: running: %v", fs.SpaceSepList(s.cmd.Args))
|
||||
|
||||
// start the process
|
||||
err := s.cmd.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh external: start process: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem
|
||||
// with the session on the remote host. A subsystem is a
|
||||
// predefined command that runs in the background when the ssh
|
||||
// session is initiated
|
||||
func (s *sshSessionExternal) RequestSubsystem(subsystem string) error {
|
||||
return s.Start(requestSubsystem + subsystem)
|
||||
}
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
func (s *sshSessionExternal) StdinPipe() (io.WriteCloser, error) {
|
||||
rd, err := s.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh external: stdin pipe: %w", err)
|
||||
}
|
||||
return rd, nil
|
||||
}
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
func (s *sshSessionExternal) StdoutPipe() (io.Reader, error) {
|
||||
wr, err := s.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh external: stdout pipe: %w", err)
|
||||
}
|
||||
return wr, nil
|
||||
}
|
||||
|
||||
// Return whether the command has finished or not
|
||||
func (s *sshSessionExternal) exited() bool {
|
||||
return s.cmd.ProcessState != nil
|
||||
}
|
||||
|
||||
// Wait for the command to exit
|
||||
func (s *sshSessionExternal) Wait() error {
|
||||
if s.exited() {
|
||||
return nil
|
||||
}
|
||||
err := s.cmd.Wait()
|
||||
if err == nil {
|
||||
fs.Debugf(s.f, "ssh external: command exited OK")
|
||||
} else {
|
||||
fs.Debugf(s.f, "ssh external: command exited with error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
func (s *sshSessionExternal) Run(cmd string) error {
|
||||
err := s.Start(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Wait()
|
||||
}
|
||||
|
||||
// Close the external ssh
|
||||
func (s *sshSessionExternal) Close() error {
|
||||
fs.Debugf(s.f, "ssh external: close")
|
||||
// Cancel the context which kills the process
|
||||
s.cancel()
|
||||
// Wait for it to finish
|
||||
_ = s.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the stdout
|
||||
func (s *sshSessionExternal) SetStdout(wr io.Writer) {
|
||||
s.cmd.Stdout = wr
|
||||
}
|
||||
|
||||
// Set the stderr
|
||||
func (s *sshSessionExternal) SetStderr(wr io.Writer) {
|
||||
s.cmd.Stderr = wr
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshSession = &sshSessionExternal{}
|
||||
@@ -1,101 +0,0 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// Internal ssh connections with "golang.org/x/crypto/ssh"
|
||||
|
||||
type sshClientInternal struct {
|
||||
srv *ssh.Client
|
||||
}
|
||||
|
||||
// newSSHClientInternal starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (sshClient, error) {
|
||||
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, addr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
|
||||
srv := ssh.NewClient(c, chans, reqs)
|
||||
return sshClientInternal{srv}, nil
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (s sshClientInternal) Wait() error {
|
||||
return s.srv.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (s sshClientInternal) SendKeepAlive() {
|
||||
_, _, err := s.srv.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (s sshClientInternal) Close() error {
|
||||
return s.srv.Close()
|
||||
}
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
func (s sshClientInternal) CanReuse() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshClient = sshClientInternal{}
|
||||
|
||||
// Thin wrapper for *ssh.Session to implement sshSession interface
|
||||
type sshSessionInternal struct {
|
||||
*ssh.Session
|
||||
}
|
||||
|
||||
// Set the stdout
|
||||
func (s sshSessionInternal) SetStdout(wr io.Writer) {
|
||||
s.Session.Stdout = wr
|
||||
}
|
||||
|
||||
// Set the stderr
|
||||
func (s sshSessionInternal) SetStderr(wr io.Writer) {
|
||||
s.Session.Stderr = wr
|
||||
}
|
||||
|
||||
// NewSession makes an sshSession from an sshClient
|
||||
func (s sshClientInternal) NewSession() (sshSession, error) {
|
||||
session, err := s.srv.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sshSessionInternal{Session: session}, nil
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshSession = sshSessionInternal{}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
@@ -88,17 +89,17 @@ func (c *conn) closed() bool {
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
f.sessions.Add(1)
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
}
|
||||
|
||||
// Show the SMB session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
f.sessions.Add(-1)
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return f.sessions.Load()
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -141,7 +140,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
|
||||
sessions atomic.Int32
|
||||
sessions int32
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
@@ -476,45 +475,6 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
var err error
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
return fl, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/edge"
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -277,8 +276,6 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
UserAgent: "rclone",
|
||||
}
|
||||
|
||||
ctx = testuplink.WithConcurrentSegmentUploadsDefaultConfig(ctx)
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: project: %w", err)
|
||||
|
||||
@@ -561,7 +561,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
|
||||
// returned as 0 bytes in the listing. Correct this here by
|
||||
// making sure we read the full metadata for all 0 byte files.
|
||||
// We don't read the metadata for directory marker objects.
|
||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" && !o.fs.opt.NoLargeObjects {
|
||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// We have a dangling large object here so just return the original metadata
|
||||
|
||||
@@ -17,9 +17,8 @@ import (
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
*upstream.Object
|
||||
fs *Fs // what this object is part of
|
||||
co []upstream.Entry
|
||||
writebackMu sync.Mutex
|
||||
fs *Fs // what this object is part of
|
||||
co []upstream.Entry
|
||||
}
|
||||
|
||||
// Directory describes a union Directory
|
||||
@@ -35,13 +34,6 @@ type entry interface {
|
||||
candidates() []upstream.Entry
|
||||
}
|
||||
|
||||
// Update o with the contents of newO excluding the lock
|
||||
func (o *Object) update(newO *Object) {
|
||||
o.Object = newO.Object
|
||||
o.fs = newO.fs
|
||||
o.co = newO.co
|
||||
}
|
||||
|
||||
// UnWrapUpstream returns the upstream Object that this Object is wrapping
|
||||
func (o *Object) UnWrapUpstream() *upstream.Object {
|
||||
return o.Object
|
||||
@@ -75,7 +67,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
// Update current object
|
||||
o.update(newO.(*Object))
|
||||
*o = *newO.(*Object)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
@@ -183,25 +175,6 @@ func (o *Object) SetTier(tier string) error {
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Need some sort of locking to prevent multiple downloads
|
||||
o.writebackMu.Lock()
|
||||
defer o.writebackMu.Unlock()
|
||||
|
||||
// FIXME what if correct object is already in o.co
|
||||
|
||||
newObj, err := o.Object.Writeback(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if newObj != nil {
|
||||
o.Object = newObj
|
||||
o.co = append(o.co, newObj) // FIXME should this append or overwrite or update?
|
||||
}
|
||||
return o.Object.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the directory
|
||||
// It returns the latest ModTime of all candidates
|
||||
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
|
||||
|
||||
@@ -877,10 +877,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
upstreams: usedUpstreams,
|
||||
}
|
||||
err = upstream.Prepare(f.upstreams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -26,6 +25,10 @@ var (
|
||||
|
||||
// Fs is a wrap of any fs and its configs
|
||||
type Fs struct {
|
||||
// In order to ensure memory alignment on 32-bit architectures
|
||||
// when this field is accessed through sync/atomic functions,
|
||||
// it must be the first entry in the struct
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
fs.Fs
|
||||
RootFs fs.Fs
|
||||
RootPath string
|
||||
@@ -34,12 +37,9 @@ type Fs struct {
|
||||
creatable bool
|
||||
usage *fs.Usage // Cache the usage
|
||||
cacheTime time.Duration // cache duration
|
||||
cacheExpiry atomic.Int64 // usage cache expiry time
|
||||
cacheMutex sync.RWMutex
|
||||
cacheOnce sync.Once
|
||||
cacheUpdate bool // if the cache is updating
|
||||
writeback bool // writeback to this upstream
|
||||
writebackFs *Fs // if non zero, writeback to this upstream
|
||||
}
|
||||
|
||||
// Directory describes a wrapped Directory
|
||||
@@ -73,14 +73,14 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
RootPath: strings.TrimRight(root, "/"),
|
||||
Opt: opt,
|
||||
writable: true,
|
||||
creatable: true,
|
||||
cacheTime: time.Duration(opt.CacheTime) * time.Second,
|
||||
usage: &fs.Usage{},
|
||||
RootPath: strings.TrimRight(root, "/"),
|
||||
Opt: opt,
|
||||
writable: true,
|
||||
creatable: true,
|
||||
cacheExpiry: time.Now().Unix(),
|
||||
cacheTime: time.Duration(opt.CacheTime) * time.Second,
|
||||
usage: &fs.Usage{},
|
||||
}
|
||||
f.cacheExpiry.Store(time.Now().Unix())
|
||||
if strings.HasSuffix(fsPath, ":ro") {
|
||||
f.writable = false
|
||||
f.creatable = false
|
||||
@@ -89,9 +89,6 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
f.writable = true
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
} else if strings.HasSuffix(fsPath, ":writeback") {
|
||||
f.writeback = true
|
||||
fsPath = fsPath[0 : len(fsPath)-len(":writeback")]
|
||||
}
|
||||
remote = configName + fsPath
|
||||
rFs, err := cache.Get(ctx, remote)
|
||||
@@ -109,29 +106,6 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Prepare the configured upstreams as a group
|
||||
func Prepare(fses []*Fs) error {
|
||||
writebacks := 0
|
||||
var writebackFs *Fs
|
||||
for _, f := range fses {
|
||||
if f.writeback {
|
||||
writebackFs = f
|
||||
writebacks++
|
||||
}
|
||||
}
|
||||
if writebacks == 0 {
|
||||
return nil
|
||||
} else if writebacks > 1 {
|
||||
return fmt.Errorf("can only have 1 :writeback not %d", writebacks)
|
||||
}
|
||||
for _, f := range fses {
|
||||
if !f.writeback {
|
||||
f.writebackFs = writebackFs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WrapDirectory wraps an fs.Directory to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
|
||||
@@ -322,31 +296,9 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Writeback writes the object back and returns a new object
|
||||
//
|
||||
// If it returns nil, nil then the original object is OK
|
||||
func (o *Object) Writeback(ctx context.Context) (*Object, error) {
|
||||
if o.f.writebackFs == nil {
|
||||
return nil, nil
|
||||
}
|
||||
newObj, err := operations.Copy(ctx, o.f.writebackFs.Fs, nil, o.Object.Remote(), o.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// newObj could be nil here
|
||||
if newObj == nil {
|
||||
fs.Errorf(o, "nil Object returned from operations.Copy")
|
||||
return nil, nil
|
||||
}
|
||||
return &Object{
|
||||
Object: newObj,
|
||||
f: o.f,
|
||||
}, err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return nil, ErrUsageFieldNotSupported
|
||||
@@ -361,7 +313,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
//
|
||||
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
|
||||
func (f *Fs) GetFreeSpace() (int64, error) {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return math.MaxInt64 - 1, ErrUsageFieldNotSupported
|
||||
@@ -379,7 +331,7 @@ func (f *Fs) GetFreeSpace() (int64, error) {
|
||||
//
|
||||
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
|
||||
func (f *Fs) GetUsedSpace() (int64, error) {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
@@ -395,7 +347,7 @@ func (f *Fs) GetUsedSpace() (int64, error) {
|
||||
|
||||
// GetNumObjects get the number of objects of the fs
|
||||
func (f *Fs) GetNumObjects() (int64, error) {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
@@ -450,7 +402,7 @@ func (f *Fs) updateUsageCore(lock bool) error {
|
||||
defer f.cacheMutex.Unlock()
|
||||
}
|
||||
// Store usage
|
||||
f.cacheExpiry.Store(time.Now().Add(f.cacheTime).Unix())
|
||||
atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
|
||||
f.usage = usage
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
|
||||
|
||||
getBody := func() (io.ReadCloser, error) {
|
||||
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
|
||||
if _, err := in.Seek(0, io.SeekStart); err != nil {
|
||||
if _, err := in.Seek(0, io.SeekStart); err == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) erro
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
|
||||
// directory doesn't exist, no need to purge
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
||||
"github.com/rclone/rclone/backend/zoho/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -1168,8 +1169,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
var start, end int64 = 0, o.size
|
||||
partialContent := false
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
start = x.Offset
|
||||
partialContent = true
|
||||
case *fs.RangeOption:
|
||||
if x.Start >= 0 {
|
||||
start = x.Start
|
||||
if x.End > 0 && x.End < o.size {
|
||||
end = x.End + 1
|
||||
}
|
||||
} else {
|
||||
// {-1, 20} should load the last 20 characters [len-20:len]
|
||||
start = o.size - x.End
|
||||
}
|
||||
partialContent = true
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(nil, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
var resp *http.Response
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/download/" + o.id,
|
||||
@@ -1182,6 +1206,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if partialContent && resp.StatusCode == 200 && resp.Header.Get("Content-Range") == "" {
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// ... and return a limited reader for the remaining of the data
|
||||
return readers.NewLimitedReadCloser(resp.Body, end-start), nil
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,4 +6,3 @@
|
||||
<abhi18av@users.noreply.github.com>
|
||||
<ankur0493@gmail.com>
|
||||
<agupta@egnyte.com>
|
||||
<ricci@disroot.org>
|
||||
|
||||
@@ -66,7 +66,6 @@ docs = [
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"premiumizeme.md",
|
||||
"protondrive.md",
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
|
||||
@@ -22,8 +22,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable", "")
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable")
|
||||
}
|
||||
|
||||
// printValue formats uv to be output
|
||||
@@ -95,7 +95,6 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
// "groups": "",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser", "")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses", "")
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -36,7 +36,6 @@ link in default browser automatically.
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
// "groups": "",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
|
||||
@@ -24,8 +24,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name", "")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format", "")
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -60,7 +60,6 @@ Note to run these commands on a running backend then see
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
"groups": "Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
|
||||
@@ -614,8 +614,6 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
opt.DryRun = true
|
||||
case "force":
|
||||
opt.Force = true
|
||||
case "create-empty-src-dirs":
|
||||
opt.CreateEmptySrcDirs = true
|
||||
case "remove-empty-dirs":
|
||||
opt.RemoveEmptyDirs = true
|
||||
case "check-sync-only":
|
||||
@@ -1165,10 +1163,6 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
||||
b.workDir + slash, "{workdir/}",
|
||||
b.path1, "{path1/}",
|
||||
b.path2, "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.sessionName, "{session}",
|
||||
}
|
||||
if fixSlash {
|
||||
|
||||
@@ -27,21 +27,18 @@ import (
|
||||
|
||||
// Options keep bisync options
|
||||
type Options struct {
|
||||
Resync bool
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
CreateEmptySrcDirs bool
|
||||
RemoveEmptyDirs bool
|
||||
MaxDelete int // percentage from 0 to 100
|
||||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
IgnoreListingChecksum bool
|
||||
Resilient bool
|
||||
Resync bool
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
RemoveEmptyDirs bool
|
||||
MaxDelete int // percentage from 0 to 100
|
||||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
}
|
||||
|
||||
// Default values
|
||||
@@ -101,19 +98,16 @@ var Opt Options
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
||||
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CreateEmptySrcDirs, "create-empty-src-dirs", "", Opt.CreateEmptySrcDirs, "Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."))
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"))
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose")
|
||||
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)")
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove empty directories at the final cleanup step.")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"))
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).")
|
||||
}
|
||||
|
||||
// bisync command definition
|
||||
@@ -123,7 +117,6 @@ var commandDefinition = &cobra.Command{
|
||||
Long: longHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
@@ -216,13 +209,9 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
||||
}
|
||||
|
||||
if opt.Resync {
|
||||
if opt.DryRun {
|
||||
fs.Infof(nil, "Skipped storing filters file hash to %s as --dry-run is set", hashFile)
|
||||
} else {
|
||||
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
||||
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
||||
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,18 +3,13 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
@@ -95,47 +90,6 @@ func (ds *deltaSet) printStats() {
|
||||
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
|
||||
// note that cryptCheck() is not currently exported
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.Check(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// findDeltas
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
|
||||
var old, now *fileList
|
||||
@@ -229,52 +183,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
ctxMove := b.opt.setDryRun(ctx)
|
||||
|
||||
// efficient isDir check
|
||||
// we load the listing just once and store only the dirs
|
||||
dirs1, dirs1Err := b.listDirsOnly(1)
|
||||
if dirs1Err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "Error generating dirsonly list for path1: %v", dirs1Err)
|
||||
return
|
||||
}
|
||||
|
||||
dirs2, dirs2Err := b.listDirsOnly(2)
|
||||
if dirs2Err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "Error generating dirsonly list for path2: %v", dirs2Err)
|
||||
return
|
||||
}
|
||||
|
||||
// build a list of only the "deltaOther"s so we don't have to check more files than necessary
|
||||
// this is essentially the same as running rclone check with a --files-from filter, then exempting the --match results from being renamed
|
||||
// we therefore avoid having to list the same directory more than once.
|
||||
|
||||
// we are intentionally overriding DryRun here because we need to perform the check, even during a dry run, or the results would be inaccurate.
|
||||
// check is a read-only operation by its nature, so it's already "dry" in that sense.
|
||||
ctxNew, ciCheck := fs.AddConfig(ctx)
|
||||
ciCheck.DryRun = false
|
||||
|
||||
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
d1 := ds1.deltas[file]
|
||||
if d1.is(deltaOther) {
|
||||
d2 := ds2.deltas[file]
|
||||
if d2.is(deltaOther) {
|
||||
if err := filterCheck.AddFile(file); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
p1 := path1 + file
|
||||
p2 := path2 + file
|
||||
@@ -291,34 +199,22 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
handled.Add(file)
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
if dirs1.has(file) && dirs2.has(file) {
|
||||
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
} else {
|
||||
equal := matches.Has(file)
|
||||
if equal {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
} else {
|
||||
fs.Debugf(nil, "Files are NOT equal: %s", file)
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
}
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
handled.Add(file)
|
||||
}
|
||||
} else {
|
||||
@@ -362,9 +258,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() {
|
||||
@@ -374,9 +267,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
|
||||
}
|
||||
|
||||
if delete1.NotEmpty() {
|
||||
@@ -386,9 +276,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
|
||||
}
|
||||
|
||||
if delete2.NotEmpty() {
|
||||
@@ -398,9 +285,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
@@ -27,16 +27,11 @@ var rcHelp = makeHelp(`This takes the following parameters
|
||||
- checkFilename - file name for checkAccess (default: {CHECKFILE})
|
||||
- maxDelete - abort sync if percentage of deleted files is above
|
||||
this threshold (default: {MAXDELETE})
|
||||
- force - Bypass maxDelete safety check and run the sync
|
||||
- force - maxDelete safety check and run the sync
|
||||
- checkSync - |true| by default, |false| disables comparison of final listings,
|
||||
|only| will skip sync, only compare listings from the last run
|
||||
- createEmptySrcDirs - Sync creation and deletion of empty directories.
|
||||
(Not compatible with --remove-empty-dirs)
|
||||
- removeEmptyDirs - remove empty directories at the final cleanup step
|
||||
- filtersFile - read filtering patterns from a file
|
||||
- ignoreListingChecksum - Do not use checksums for listings
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
Use at your own risk!
|
||||
- workdir - server directory for history files (default: {WORKDIR})
|
||||
- noCleanup - retain working files
|
||||
|
||||
|
||||
@@ -43,11 +43,10 @@ var tzLocal = false
|
||||
|
||||
// fileInfo describes a file
|
||||
type fileInfo struct {
|
||||
size int64
|
||||
time time.Time
|
||||
hash string
|
||||
id string
|
||||
flags string
|
||||
size int64
|
||||
time time.Time
|
||||
hash string
|
||||
id string
|
||||
}
|
||||
|
||||
// fileList represents a listing
|
||||
@@ -77,18 +76,17 @@ func (ls *fileList) get(file string) *fileInfo {
|
||||
return ls.info[file]
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) {
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string) {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
fi.size = size
|
||||
fi.time = time
|
||||
} else {
|
||||
fi = &fileInfo{
|
||||
size: size,
|
||||
time: time,
|
||||
hash: hash,
|
||||
id: id,
|
||||
flags: flags,
|
||||
size: size,
|
||||
time: time,
|
||||
hash: hash,
|
||||
id: id,
|
||||
}
|
||||
ls.info[file] = fi
|
||||
ls.list = append(ls.list, file)
|
||||
@@ -154,11 +152,7 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
id = "-"
|
||||
}
|
||||
|
||||
flags := fi.flags
|
||||
if flags == "" {
|
||||
flags = "-"
|
||||
}
|
||||
|
||||
flags := "-"
|
||||
_, err = fmt.Fprintf(file, lineFormat, flags, fi.size, hash, id, time, remote)
|
||||
if err != nil {
|
||||
_ = file.Close()
|
||||
@@ -223,7 +217,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if (flags != "-" && flags != "d") || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
|
||||
if flags != "-" || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
|
||||
fs.Logf(listing, "Ignoring incorrect line: %q", line)
|
||||
continue
|
||||
}
|
||||
@@ -235,7 +229,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
}
|
||||
}
|
||||
|
||||
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id, flags)
|
||||
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id)
|
||||
}
|
||||
|
||||
return ls, nil
|
||||
@@ -259,20 +253,15 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
|
||||
ci := fs.GetConfig(ctx)
|
||||
depth := ci.MaxDepth
|
||||
hashType := hash.None
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
// Currently bisync just honors --ignore-listing-checksum
|
||||
// (note that this is different from --ignore-checksum)
|
||||
if !ci.IgnoreChecksum {
|
||||
// Currently bisync just honors --ignore-checksum
|
||||
// TODO add full support for checksums and related flags
|
||||
hashType = f.Hashes().GetOne()
|
||||
}
|
||||
ls = newFileList()
|
||||
ls.hash = hashType
|
||||
var lock sync.Mutex
|
||||
listType := walk.ListObjects
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
listType = walk.ListAll
|
||||
}
|
||||
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
|
||||
err = walk.ListR(ctx, f, "", false, depth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
var firstErr error
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
|
||||
@@ -287,27 +276,12 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
|
||||
}
|
||||
}
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
id := "" // TODO
|
||||
lock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags)
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id)
|
||||
lock.Unlock()
|
||||
//tr.Done(ctx, nil) // TODO
|
||||
})
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
entries.ForDir(func(o fs.Directory) {
|
||||
var (
|
||||
hashVal string
|
||||
)
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
|
||||
ls.put(o.Remote(), 0, time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
})
|
||||
}
|
||||
return firstErr
|
||||
})
|
||||
if err == nil {
|
||||
@@ -326,53 +300,5 @@ func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
}
|
||||
fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing)
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return fmt.Errorf("empty %s listing: %s", msg, listing)
|
||||
}
|
||||
|
||||
// listingNum should be 1 for path1 or 2 for path2
|
||||
func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
|
||||
listingpath := b.basePath + ".path1.lst-new"
|
||||
if listingNum == 2 {
|
||||
listingpath = b.basePath + ".path2.lst-new"
|
||||
}
|
||||
|
||||
if b.opt.DryRun {
|
||||
listingpath = strings.Replace(listingpath, ".lst-", ".lst-dry-", 1)
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "loading listing for path %d at: %s", listingNum, listingpath)
|
||||
return b.loadListing(listingpath)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
var fulllisting *fileList
|
||||
var dirsonly = newFileList()
|
||||
var err error
|
||||
|
||||
if !b.opt.CreateEmptySrcDirs {
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
fulllisting, err = b.loadListingNum(listingNum)
|
||||
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "Error loading listing to generate dirsonly list: %v", err)
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
for _, obj := range fulllisting.list {
|
||||
info := fulllisting.get(obj)
|
||||
|
||||
if info.flags == "d" {
|
||||
fs.Debugf(nil, "found a dir: %s", obj)
|
||||
dirsonly.put(obj, info.size, info.time, info.hash, info.id, info.flags)
|
||||
} else {
|
||||
fs.Debugf(nil, "not a dir: %s", obj)
|
||||
}
|
||||
}
|
||||
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
@@ -25,14 +25,13 @@ var ErrBisyncAborted = errors.New("bisync aborted")
|
||||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
type bisyncRun struct {
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
retryable bool
|
||||
basePath string
|
||||
workDir string
|
||||
opt *Options
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
basePath string
|
||||
workDir string
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// Bisync handles lock file, performs bisync run and checks exit status
|
||||
@@ -124,19 +123,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")
|
||||
} else {
|
||||
if bilib.FileExists(listing1) {
|
||||
_ = os.Rename(listing1, listing1+"-err")
|
||||
}
|
||||
if bilib.FileExists(listing2) {
|
||||
_ = os.Rename(listing2, listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
|
||||
if bilib.FileExists(listing1) {
|
||||
_ = os.Rename(listing1, listing1+"-err")
|
||||
}
|
||||
if bilib.FileExists(listing2) {
|
||||
_ = os.Rename(listing2, listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort {
|
||||
@@ -158,7 +152,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err = b.checkSync(listing1, listing2); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -183,7 +176,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
var fctx context.Context
|
||||
if fctx, err = b.opt.applyFilters(octx); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return
|
||||
}
|
||||
|
||||
@@ -196,7 +188,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) {
|
||||
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run")
|
||||
}
|
||||
|
||||
@@ -224,7 +215,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -265,7 +255,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
changes1, changes2, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
// b.retryable = true // not sure about this one
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -294,7 +283,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
}
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -322,7 +310,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
}
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -354,39 +341,6 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
|
||||
return err
|
||||
}
|
||||
|
||||
// Check access health on the Path1 and Path2 filesystems
|
||||
// enforce even though this is --resync
|
||||
if b.opt.CheckAccess {
|
||||
fs.Infof(nil, "Checking access health")
|
||||
|
||||
ds1 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
ds2 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
for _, file := range filesNow1.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds1.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range filesNow2.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds2.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
copy2to1 := []string{}
|
||||
for _, file := range filesNow2.list {
|
||||
if !filesNow1.has(file) {
|
||||
@@ -413,34 +367,11 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
|
||||
// prevent overwriting Google Doc files (their size is -1)
|
||||
filterSync.Opt.MinSize = 0
|
||||
}
|
||||
if err = sync.CopyDir(ctxSync, b.fs2, b.fs1, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
if err = sync.Sync(ctxSync, b.fs2, b.fs1, false); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
// copy Path2 back to Path1, for empty dirs
|
||||
// the fastCopy above cannot include directories, because it relies on --files-from for filtering,
|
||||
// so instead we'll copy them here, relying on fctx for our filtering.
|
||||
|
||||
// This preserves the original resync order for backward compatibility. It is essentially:
|
||||
// rclone copy Path2 Path1 --ignore-existing
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
|
||||
// although if we were starting from scratch, it might be cleaner and faster to just do:
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
|
||||
fs.Infof(nil, "Resynching Path2 to Path1 (for empty dirs)")
|
||||
|
||||
//note copy (not sync) and dst comes before src
|
||||
if err = sync.CopyDir(ctxSync, b.fs1, b.fs2, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resync updating listings")
|
||||
if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil {
|
||||
b.critical = true
|
||||
|
||||
@@ -3,7 +3,6 @@ package bisync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -24,7 +23,7 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
||||
}
|
||||
}
|
||||
|
||||
return sync.CopyDir(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
return sync.CopyDir(ctxCopy, fdst, fsrc, false)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error {
|
||||
@@ -33,14 +32,7 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
|
||||
}
|
||||
|
||||
transfers := fs.GetConfig(ctx).Transfers
|
||||
|
||||
ctxRun, filterDelete := filter.AddConfig(b.opt.setDryRun(ctx))
|
||||
|
||||
for _, file := range files.ToList() {
|
||||
if err := filterDelete.AddFile(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ctxRun := b.opt.setDryRun(ctx)
|
||||
|
||||
objChan := make(fs.ObjectsChan, transfers)
|
||||
errChan := make(chan error, 1)
|
||||
@@ -61,36 +53,6 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
|
||||
return err
|
||||
}
|
||||
|
||||
// operation should be "make" or "remove"
|
||||
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, operation string) {
|
||||
if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") {
|
||||
|
||||
candidatesList := candidates.ToList()
|
||||
if operation == "remove" {
|
||||
// reverse the sort order to ensure we remove subdirs before parent dirs
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(candidatesList)))
|
||||
}
|
||||
|
||||
for _, s := range candidatesList {
|
||||
var direrr error
|
||||
if dirsList.has(s) { //make sure it's a dir, not a file
|
||||
if operation == "remove" {
|
||||
//note: we need to use Rmdirs instead of Rmdir because directories will fail to delete if they have other empty dirs inside of them.
|
||||
direrr = operations.Rmdirs(ctx, dst, s, false)
|
||||
} else if operation == "make" {
|
||||
direrr = operations.Mkdir(ctx, dst, s)
|
||||
} else {
|
||||
direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation)
|
||||
}
|
||||
|
||||
if direrr != nil {
|
||||
fs.Debugf(nil, "Error syncing directory: %v", direrr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
|
||||
if !b.opt.SaveQueues {
|
||||
return nil
|
||||
|
||||
@@ -26,7 +26,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
|
||||
if dryRun, err := in.GetBool("dryRun"); err == nil {
|
||||
ci.DryRun = dryRun
|
||||
opt.DryRun = dryRun
|
||||
} else if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
@@ -49,21 +48,12 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if opt.Force, err = in.GetBool("force"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.CreateEmptySrcDirs, err = in.GetBool("createEmptySrcDirs"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.RemoveEmptyDirs, err = in.GetBool("removeEmptyDirs"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.NoCleanup, err = in.GetBool("noCleanup"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.IgnoreListingChecksum, err = in.GetBool("ignoreListingChecksum"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.Resilient, err = in.GetBool("resilient"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
|
||||
if opt.CheckFilename, err = in.GetString("checkFilename"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
@@ -79,9 +69,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
if checkSync == "" {
|
||||
checkSync = "true"
|
||||
}
|
||||
if err := opt.CheckSync.Set(checkSync); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
|
||||
@@ -68,11 +68,6 @@ INFO : - Path2 File was deleted - file7.txt
|
||||
INFO : - Path2 File was deleted - file8.txt
|
||||
INFO : Path2: 7 changes: 1 new, 3 newer, 0 older, 3 deleted
|
||||
INFO : Applying changes
|
||||
INFO : Checking potential conflicts...
|
||||
ERROR : file5.txt: md5 differ
|
||||
NOTICE: Local file system at {path2}: 1 differences found
|
||||
NOTICE: Local file system at {path2}: 1 errors while checking
|
||||
INFO : Finished checking the potential conflicts. 1 differences found
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file11.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file2.txt
|
||||
INFO : - Path2 Queue delete - {path2/}file4.txt
|
||||
|
||||
@@ -1 +1 @@
|
||||
This file is newer and not equal to 5R
|
||||
This file is newer
|
||||
|
||||
@@ -1 +1 @@
|
||||
This file is newer and not equal to 5L
|
||||
This file is newer
|
||||
|
||||
@@ -39,12 +39,10 @@ Bisync error: bisync aborted
|
||||
(10) : move-listings path2-missing
|
||||
|
||||
(11) : test 3. put the remote subdir .chk_file back, run resync.
|
||||
(12) : copy-file {path1/}subdir/.chk_file {path2/}subdir/
|
||||
(12) : copy-file {path1/}subdir/.chk_file {path2/}
|
||||
(13) : bisync check-access resync check-filename=.chk_file
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : Checking access health
|
||||
INFO : Found 2 matching ".chk_file" files on both paths
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
|
||||
@@ -20,7 +20,7 @@ bisync check-access check-filename=.chk_file
|
||||
move-listings path2-missing
|
||||
|
||||
test 3. put the remote subdir .chk_file back, run resync.
|
||||
copy-file {path1/}subdir/.chk_file {path2/}subdir/
|
||||
copy-file {path1/}subdir/.chk_file {path2/}
|
||||
bisync check-access resync check-filename=.chk_file
|
||||
|
||||
test 4. run sync with check-access. should pass.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"subdir"
|
||||
@@ -1 +0,0 @@
|
||||
"subdir"
|
||||
@@ -1,7 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -1,7 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -1,7 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -1,7 +0,0 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -1 +0,0 @@
|
||||
"subdir"
|
||||
@@ -1,142 +0,0 @@
|
||||
(01) : test createemptysrcdirs
|
||||
|
||||
|
||||
(02) : test initial bisync
|
||||
(03) : touch-glob 2001-01-02 {datadir/} placeholder.txt
|
||||
(04) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
(05) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
(06) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
(07) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
(08) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
(09) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
(10) : bisync resync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
|
||||
(11) : test 1. Create an empty dir on Path1 by creating subdir/placeholder.txt and then deleting the placeholder
|
||||
(12) : copy-as {datadir/}placeholder.txt {path1/} subdir/placeholder.txt
|
||||
(13) : touch-glob 2001-01-02 {path1/} subdir
|
||||
(14) : delete-file {path1/}subdir/placeholder.txt
|
||||
|
||||
(15) : test 2. Run bisync without --create-empty-src-dirs
|
||||
(16) : bisync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(17) : test 3. Confirm the subdir exists only on Path1 and not Path2
|
||||
(18) : list-dirs {path1/}
|
||||
subdir/
|
||||
(19) : list-dirs {path2/}
|
||||
|
||||
(20) : test 4.Run bisync WITH --create-empty-src-dirs
|
||||
(21) : bisync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File is new - subdir
|
||||
INFO : Path1: 1 changes: 1 new, 0 newer, 0 older, 0 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}subdir
|
||||
INFO : - Path1 Do queued copies to - Path2
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(22) : test 5. Confirm the subdir exists on both paths
|
||||
(23) : list-dirs {path1/}
|
||||
subdir/
|
||||
(24) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(25) : test 6. Delete the empty dir on Path1 using purge-children (and also add files so the path isn't empty)
|
||||
(26) : purge-children {path1/}
|
||||
(27) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
(28) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
(29) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
(30) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
(31) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
(32) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
|
||||
(33) : test 7. Run bisync without --create-empty-src-dirs
|
||||
(34) : bisync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File was deleted - RCLONE_TEST
|
||||
INFO : - Path1 File was deleted - subdir
|
||||
INFO : Path1: 2 changes: 0 new, 0 newer, 0 older, 2 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - Path2 File was deleted - subdir
|
||||
INFO : Path2: 1 changes: 0 new, 0 newer, 0 older, 1 deleted
|
||||
INFO : Applying changes
|
||||
INFO : - Path2 Queue delete - {path2/}RCLONE_TEST
|
||||
INFO : - Do queued deletes on - Path2
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(35) : test 8. Confirm the subdir exists only on Path2 and not Path1
|
||||
(36) : list-dirs {path1/}
|
||||
(37) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(38) : test 9. Reset, do the delete again, and run bisync WITH --create-empty-src-dirs
|
||||
(39) : bisync resync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : - Path2 Resync will copy to Path1 - subdir
|
||||
INFO : - Path2 Resync is doing queued copies to - Path1
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resynching Path2 to Path1 (for empty dirs)
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
(40) : list-dirs {path1/}
|
||||
subdir/
|
||||
(41) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(42) : purge-children {path1/}
|
||||
(43) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
(44) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
(45) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
(46) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
(47) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
(48) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
(49) : list-dirs {path1/}
|
||||
(50) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(51) : bisync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File was deleted - subdir
|
||||
INFO : Path1: 1 changes: 0 new, 0 newer, 0 older, 1 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - Path2 Queue delete - {path2/}subdir
|
||||
INFO : - Do queued deletes on - Path2
|
||||
INFO : subdir: Removing directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(52) : test 10. Confirm the subdir has been removed on both paths
|
||||
(53) : list-dirs {path1/}
|
||||
(54) : list-dirs {path2/}
|
||||
|
||||
(55) : test 11. bisync again (because if we leave subdir in listings, test will fail due to mismatched modtime)
|
||||
(56) : bisync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
@@ -1 +0,0 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
||||
@@ -1,87 +0,0 @@
|
||||
test createemptysrcdirs
|
||||
# Test the --create-empty-src-dirs logic.
|
||||
# Should behave the same way as rclone sync.
|
||||
# Without this flag, empty directories created/deleted on one side are NOT created/deleted on the other side
|
||||
# With this flag, empty directories created/deleted on one side are created/deleted on the other side; the result should be an exact mirror.
|
||||
#
|
||||
# Placeholders are necessary to ensure that git does not lose our empty folders
|
||||
# After the initial setup sync:
|
||||
# 1. Create an empty dir on Path1 by creating subdir/placeholder.txt and then deleting the placeholder
|
||||
# 2. Run bisync without --create-empty-src-dirs
|
||||
# 3. Confirm the subdir exists only on Path1 and not Path2
|
||||
# 4. Run bisync WITH --create-empty-src-dirs
|
||||
# 5. Confirm the subdir exists on both paths
|
||||
# 6. Delete the empty dir on Path1 using purge-children (and also add files so the path isn't empty)
|
||||
# 7. Run bisync without --create-empty-src-dirs
|
||||
# 8. Confirm the subdir exists only on Path2 and not Path1
|
||||
# 9. Reset, do the delete again, and run bisync WITH --create-empty-src-dirs
|
||||
# 10. Confirm the subdir has been removed on both paths
|
||||
|
||||
test initial bisync
|
||||
touch-glob 2001-01-02 {datadir/} placeholder.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
bisync resync
|
||||
|
||||
test 1. Create an empty dir on Path1 by creating subdir/placeholder.txt and then deleting the placeholder
|
||||
copy-as {datadir/}placeholder.txt {path1/} subdir/placeholder.txt
|
||||
touch-glob 2001-01-02 {path1/} subdir
|
||||
delete-file {path1/}subdir/placeholder.txt
|
||||
|
||||
test 2. Run bisync without --create-empty-src-dirs
|
||||
bisync
|
||||
|
||||
test 3. Confirm the subdir exists only on Path1 and not Path2
|
||||
list-dirs {path1/}
|
||||
list-dirs {path2/}
|
||||
|
||||
test 4.Run bisync WITH --create-empty-src-dirs
|
||||
bisync create-empty-src-dirs
|
||||
|
||||
test 5. Confirm the subdir exists on both paths
|
||||
list-dirs {path1/}
|
||||
list-dirs {path2/}
|
||||
|
||||
test 6. Delete the empty dir on Path1 using purge-children (and also add files so the path isn't empty)
|
||||
purge-children {path1/}
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
|
||||
test 7. Run bisync without --create-empty-src-dirs
|
||||
bisync
|
||||
|
||||
test 8. Confirm the subdir exists only on Path2 and not Path1
|
||||
list-dirs {path1/}
|
||||
list-dirs {path2/}
|
||||
|
||||
test 9. Reset, do the delete again, and run bisync WITH --create-empty-src-dirs
|
||||
bisync resync create-empty-src-dirs
|
||||
list-dirs {path1/}
|
||||
list-dirs {path2/}
|
||||
|
||||
purge-children {path1/}
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
list-dirs {path1/}
|
||||
list-dirs {path2/}
|
||||
|
||||
bisync create-empty-src-dirs
|
||||
|
||||
test 10. Confirm the subdir has been removed on both paths
|
||||
list-dirs {path1/}
|
||||
list-dirs {path2/}
|
||||
|
||||
test 11. bisync again (because if we leave subdir in listings, test will fail due to mismatched modtime)
|
||||
bisync create-empty-src-dirs
|
||||
@@ -4,7 +4,7 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user