1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-24 04:04:37 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Chun-Hung Tseng
2ea8b3db4f protondrive: add SkipSignatureVerifications to option 2023-08-03 00:15:06 +02:00
330 changed files with 50223 additions and 73216 deletions

View File

@@ -32,7 +32,7 @@ jobs:
include:
- job_name: linux
os: ubuntu-latest
go: '1.21'
go: '1.21.0-rc.3'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.21'
go: '1.21.0-rc.3'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.21'
go: '1.21.0-rc.3'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.21'
go: '1.21.0-rc.3'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.21'
go: '1.21.0-rc.3'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -76,7 +76,7 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.21'
go: '1.21.0-rc.3'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
@@ -99,7 +99,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -232,7 +232,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Code quality test
uses: golangci/golangci-lint-action@v3
@@ -244,7 +244,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
go-version: '1.21.0-rc.3'
check-latest: true
- name: Install govulncheck
@@ -261,7 +261,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -269,7 +269,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
go-version: '1.21.0-rc.3'
- name: Go module cache
uses: actions/cache@v3

View File

@@ -11,25 +11,25 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Login to Docker Hub
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/metadata-action@v5
uses: docker/metadata-action@v4
with:
images: ghcr.io/${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will
@@ -44,7 +44,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and publish image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v4
with:
file: Dockerfile
context: .

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Build and publish docker plugin

View File

@@ -5,7 +5,7 @@ on:
jobs:
publish:
runs-on: ubuntu-latest
runs-on: windows-latest # Action can only run on Windows
steps:
- uses: vedantmgoyal2009/winget-releaser@v2
with:

2
.gitignore vendored
View File

@@ -8,10 +8,10 @@ rclone.iml
.idea
.history
*.test
*.log
*.iml
fuzz-build.zip
*.orig
*.rej
Thumbs.db
__pycache__
.DS_Store

View File

@@ -33,67 +33,23 @@ issues:
- staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run:
# timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m
linters-settings:
revive:
# setting rules seems to disable all the rules, so re-enable them here
rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: true
- name: unused-parameter
disabled: true
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
- name: empty-block
disabled: true
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks

View File

@@ -428,19 +428,14 @@ Getting going
* box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable.
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine
Important:
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
Unit tests
* Create a config entry called `TestRemote` for the unit tests to use
@@ -474,7 +469,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
* `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them in your backend with `bin/make_backend_docs.py remote`
* update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org
@@ -484,46 +479,6 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
## Adding a new s3 provider
It is quite easy to add a new S3 provider to rclone.
You'll need to modify the following files
- `backend/s3/s3.go`
- Add the provider to `providerOption` at the top of the file
- Add endpoints and other config for your provider gated on the provider in `fs.RegInfo`.
- Exclude your provider from genric config questions (eg `region` and `endpoint).
- Add the provider to the `setQuirks` function - see the documentation there.
- `docs/content/s3.md`
- Add the provider at the top of the page.
- Add a section about the provider linked from there.
- Add a transcript of a trial `rclone config` session
- Edit the transcript to remove things which might change in subsequent versions
- **Do not** alter or add to the autogenerated parts of `s3.md`
- **Do not** run `make backenddocs` or `bin/make_backend_docs.py s3`
- `README.md` - this is the home page in github
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
- `docs/content/_index.md` - this is the home page of rclone.org
- Add the provider and a link to the section you wrote in `docs/contents/s3.md`
When adding the provider, endpoints, quirks, docs etc keep them in
alphabetical order by `Provider` name, but with `AWS` first and
`Other` last.
Once you've written the docs, run `make serve` and check they look OK
in the web browser and the links (internal and external) all work.
Once you've written the code, test `rclone config` works to your
satisfaction, and check the integration tests work `go test -v -remote
NewS3Provider:`. You may need to adjust the quirks to get them to
pass. Some providers just can't pass the tests with control characters
in the names so if these fail and the provider doesn't support
`urlEncodeListings` in the quirks then ignore them. Note that the
`SetTier` test may also fail on non AWS providers.
For an example of adding an s3 provider see [eb3082a1](https://github.com/rclone/rclone/commit/eb3082a1ebdb76d5625f14cedec3f5154a5e7b10).
## Writing a plugin ##
New features (backends, commands) can also be added "out-of-tree", through Go plugins.

View File

@@ -19,8 +19,6 @@ Current active maintainers of rclone are:
| wiserain | @wiserain | pikpak backend |
| albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft**

29497
MANUAL.html generated

File diff suppressed because it is too large Load Diff

5635
MANUAL.md generated

File diff suppressed because it is too large Load Diff

29767
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -66,10 +66,6 @@ btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
btesth:
@echo "<a href="$(BETA_URL)">$(TAG)</a> on branch <a href="https://github.com/rclone/rclone/tree/$(BRANCH)">$(BRANCH)</a> (uploaded in 15-30 mins)" | xclip -r -sel clip -t text/html
@echo "Copied beta release in HTML to clip board"
version:
@echo '$(TAG)'

View File

@@ -53,7 +53,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Linode Object Storage [:page_facing_up:](https://rclone.org/s3/#linode)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -74,10 +73,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)

View File

@@ -90,28 +90,6 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.65.0
v1.64.0

View File

@@ -41,7 +41,6 @@ import (
_ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"

View File

@@ -5,6 +5,7 @@
package azureblob
import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
@@ -17,7 +18,6 @@ import (
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
@@ -33,6 +33,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -45,8 +46,10 @@ import (
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
const (
@@ -67,16 +70,12 @@ const (
emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
var (
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
// Take this when changing or reading metadata.
//
// It acts as global metadata lock so we don't bloat Object
// with an extra lock that will only very rarely be contended.
metadataMu sync.Mutex
)
// Register with Fs
@@ -338,16 +337,17 @@ to start uploading.`,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute),
Default: memoryPoolFlushTime,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: false,
Default: memoryPoolUseMmap,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -432,6 +432,8 @@ type Options struct {
ArchiveTierDelete bool `config:"archive_tier_delete"`
UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"`
DirectoryMarkers bool `config:"directory_markers"`
@@ -455,6 +457,8 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
poolSize int64 // size of pages in memory pool
publicAccess container.PublicAccessType // Container Public Access Level
}
@@ -467,7 +471,7 @@ type Object struct {
size int64 // Size of the object
mimeType string // Content-Type of the object
accessTier blob.AccessTier // Blob Access Tier
meta map[string]string // blob metadata - take metadataMu when accessing
meta map[string]string // blob metadata
}
// ------------------------------------------------------------
@@ -667,6 +671,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
cache: bucket.NewCache(),
cntSVCcache: make(map[string]*container.Client, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
poolSize: int64(opt.ChunkSize),
}
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
f.setRoot(root)
@@ -961,9 +972,6 @@ func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client
// updateMetadataWithModTime adds the modTime passed in to o.meta.
func (o *Object) updateMetadataWithModTime(modTime time.Time) {
metadataMu.Lock()
defer metadataMu.Unlock()
// Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
@@ -1495,7 +1503,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
// Remove directory marker file
if f.opt.DirectoryMarkers && container != "" && directory != "" {
if f.opt.DirectoryMarkers && container != "" && dir != "" {
o := &Object{
fs: f,
remote: dir + "/",
@@ -1529,10 +1537,7 @@ func (f *Fs) Hashes() hash.Set {
// Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" {
return errors.New("can't purge from root")
}
if directory != "" {
if container == "" || directory != "" {
// Delegate to caller if not root of a container
return fs.ErrorCantPurge
}
@@ -1589,6 +1594,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote)
}
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.ci.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -1632,9 +1650,6 @@ func (o *Object) Size() int64 {
// Set o.metadata from metadata
func (o *Object) setMetadata(metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(metadata) > 0 {
// Lower case the metadata
o.meta = make(map[string]string, len(metadata))
@@ -1659,9 +1674,6 @@ func (o *Object) setMetadata(metadata map[string]*string) {
// Get metadata from o.meta
func (o *Object) getMetadata() (metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(o.meta) == 0 {
return nil
}
@@ -1873,7 +1885,12 @@ func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.updateMetadataWithModTime(modTime)
// Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
}
// Set modTimeKey in it
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blb := o.getBlobSVC()
opt := blob.SetMetadataOptions{}
@@ -1965,8 +1982,8 @@ func (rs *readSeekCloser) Close() error {
return nil
}
// increment the array as LSB binary
func increment(xs *[8]byte) {
// increment the slice passed in as LSB binary
func increment(xs []byte) {
for i, digit := range xs {
newDigit := digit + 1
xs[i] = newDigit
@@ -1977,43 +1994,22 @@ func increment(xs *[8]byte) {
}
}
// record chunk number and id for Close
type azBlock struct {
chunkNumber int
id string
}
var warnStreamUpload sync.Once
// Implements the fs.ChunkWriter interface
type azChunkWriter struct {
chunkSize int64
size int64
f *Fs
ui uploadInfo
blocksMu sync.Mutex // protects the below
blocks []azBlock // list of blocks for finalize
binaryBlockID [8]byte // block counter as LSB first 8 bytes
o *Object
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
// uploadMultipart uploads a file using multipart upload
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
// Calculate correct partSize
partSize := f.opt.ChunkSize
partSize := o.fs.opt.ChunkSize
totalParts := -1
size := src.Size()
// make concurrency machinery
concurrency := o.fs.opt.UploadConcurrency
if concurrency < 1 {
concurrency = 1
}
tokens := pacer.NewTokenDispenser(concurrency)
// Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block
@@ -2027,13 +2023,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
// 195GB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
})
} else {
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize)
partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
}
totalParts = int(fs.SizeSuffix(size) / partSize)
if fs.SizeSuffix(size)%partSize != 0 {
@@ -2043,264 +2039,173 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
chunkWriter := &azChunkWriter{
chunkSize: int64(partSize),
size: size,
f: f,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(partSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload")
return info, chunkWriter, nil
}
// unwrap the accounting from the input, we use wrap to put it
// back on after the buffering
in, wrap := accounting.UnWrap(in)
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// FIXME it would be nice to delete uncommitted blocks
// See: https://github.com/rclone/rclone/issues/5583
//
// However there doesn't seem to be an easy way of doing this other than
// by deleting the target.
//
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it
// doesn't then create a 0 length file and delete it to flush
// the uncommitted blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
// defer atexit.OnError(&err, func() {
// fs.Debugf(o, "Cancelling multipart upload")
// // Code goes here!
// })()
// Upload the block, with MD5 for check
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sum := m.Sum(nil)
transactionalMD5 := md5sum[:]
// Upload the chunks
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
position = fs.SizeSuffix(0) // position in file
memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
finished = false // set when we have read EOF
blocks []string // list of blocks for finalize
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
)
for part := 0; !finished; part++ {
// Get a block of memory from the pool and a token which limits concurrency
tokens.Get()
buf := memPool.Get()
// increment the blockID and save the blocks for finalize
increment(&w.binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
// Save the blockID for the commit
w.blocksMu.Lock()
w.blocks = append(w.blocks, azBlock{
chunkNumber: chunkNumber,
id: blockID,
})
w.blocksMu.Unlock()
err = w.f.pacer.Call(func() (bool, error) {
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
free := func() {
memPool.Put(buf) // return the buf
tokens.Put() // return the token
}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
}
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
if err != nil {
if chunkNumber <= 8 {
return w.f.shouldRetry(ctx, err)
// Read the chunk
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
if err == io.EOF {
if n == 0 { // end if no data
free()
break
}
// retry all chunks once have done the first few
return true, err
finished = true
} else if err != nil {
free()
return fmt.Errorf("multipart upload failed to read source: %w", err)
}
return false, nil
})
if err != nil {
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
buf = buf[:n]
// increment the blockID and save the blocks for finalize
increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
g.Go(func() (err error) {
defer free()
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeekCloser{wrappedReader, bufferReader}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
}
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("multipart upload failed to upload part: %w", err)
}
return nil
})
// ready for next block
if size >= 0 {
remaining -= partSize
}
position += partSize
}
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize)
return currentChunkSize, err
}
// Abort the multipart upload.
//
// FIXME it would be nice to delete uncommitted blocks.
//
// See: https://github.com/rclone/rclone/issues/5583
//
// However there doesn't seem to be an easy way of doing this other than
// by deleting the target.
//
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it doesn't then
// create a 0 length file and delete it to flush the uncommitted
// blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
func (w *azChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
return nil
}
// Close and finalise the multipart upload
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
// sort the completed parts by part number
sort.Slice(w.blocks, func(i, j int) bool {
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
})
// Create a list of block IDs
blockIDs := make([]string, len(w.blocks))
for i := range w.blocks {
blockIDs[i] = w.blocks[i].id
err = g.Wait()
if err != nil {
return err
}
options := blockblob.CommitBlockListOptions{
Metadata: w.o.getMetadata(),
Tier: parseTier(w.f.opt.AccessTier),
HTTPHeaders: &w.ui.httpHeaders,
Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: httpHeaders,
}
// Finalise the upload session
err = w.f.pacer.Call(func() (bool, error) {
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options)
return w.f.shouldRetry(ctx, err)
err = o.fs.pacer.Call(func() (bool, error) {
_, err := blb.CommitBlockList(ctx, blocks, &options)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err)
return fmt.Errorf("multipart upload failed to finalize: %w", err)
}
fs.Debugf(w.o, "multipart upload finished")
return err
}
var warnStreamUpload sync.Once
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return ui, err
}
return chunkWriter.(*azChunkWriter).ui, nil
return nil
}
// uploadSinglepart uploads a short blob using a single part upload
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
// fs.Debugf(o, "Single part upload starting of object %d bytes", size)
if size > chunkSize || size < 0 {
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize)
if size > o.fs.poolSize || size < 0 {
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
}
rw := multipart.NewRW()
defer fs.CheckClose(rw, &err)
buf := o.fs.pool.Get()
defer o.fs.pool.Put(buf)
n, err := io.CopyN(rw, in, size+1)
n, err := readers.ReadFill(in, buf)
if err == nil {
// Check to see whether in is exactly len(buf) or bigger
var buf2 = []byte{0}
n2, err2 := readers.ReadFill(in, buf2)
if n2 != 0 || err2 != io.EOF {
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
}
}
if err != nil && err != io.EOF {
return fmt.Errorf("single part upload read failed: %w", err)
}
if n != size {
if int64(n) != size {
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
}
rs := &readSeekCloser{Reader: rw, Seeker: rw}
b := bytes.NewReader(buf[:n])
rs := &readSeekCloser{Reader: b, Seeker: b}
options := blockblob.UploadOptions{
Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: &ui.httpHeaders,
HTTPHeaders: httpHeaders,
}
return o.fs.pacer.Call(func() (bool, error) {
// rewind the reader on retry
_, err = rs.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
_, err = ui.blb.Upload(ctx, rs, &options)
// Don't retry, return a retry error instead
return o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = blb.Upload(ctx, rs, &options)
return o.fs.shouldRetry(ctx, err)
})
}
// Info needed for an upload
type uploadInfo struct {
blb *blockblob.Client
httpHeaders blob.HTTPHeaders
isDirMarker bool
}
// Prepare the object for upload
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
container, containerPath := o.split()
if container == "" || containerPath == "" {
return ui, fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
metadataMu.Lock()
_, ui.isDirMarker = o.meta[dirMetaKey]
metadataMu.Unlock()
if !ui.isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return ui, err
}
}
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return ui, err
}
// Create the HTTP headers for the upload
ui.httpHeaders = blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
ui.httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
ui.httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
ui.httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
ui.httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
ui.httpHeaders.BlobContentType = pString(value)
}
}
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
return ui, nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
@@ -2316,26 +2221,80 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errCantUpdateArchiveTierBlobs
}
}
size := src.Size()
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize)
var ui uploadInfo
if multipartUpload {
ui, err = o.uploadMultipart(ctx, in, src, options...)
} else {
ui, err = o.prepareUpload(ctx, src, options)
container, containerPath := o.split()
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
_, isDirMarker := o.meta[dirMetaKey]
if !isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err)
return err
}
err = o.uploadSinglepart(ctx, in, size, ui)
}
// Update Mod time
fs.Debugf(nil, "o.meta = %+v", o.meta)
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return err
}
// Create the HTTP headers for the upload
httpHeaders := blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
httpHeaders.BlobContentType = pString(value)
}
}
blb := o.fs.getBlockBlobSVC(container, containerPath)
size := src.Size()
multipartUpload := size < 0 || size > o.fs.poolSize
fs.Debugf(nil, "o.meta = %+v", o.meta)
if multipartUpload {
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
} else {
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
}
if err != nil {
return err
}
// Refresh metadata on object
if !ui.isDirMarker {
if !isDirMarker {
o.clearMetaData()
err = o.readMetaData(ctx)
if err != nil {
@@ -2424,14 +2383,13 @@ func parseTier(tier string) *blob.AccessTier {
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -20,18 +20,17 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) {
for _, test := range []struct {
in [8]byte
want [8]byte
in []byte
want []byte
}{
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
} {
increment(&test.in)
increment(test.in)
assert.Equal(t, test.want, test.in)
}
}

View File

@@ -31,9 +31,9 @@ func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
name := "TestAzureBlob"
name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: name,
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{

View File

@@ -33,18 +33,10 @@ var _ fserrors.Fataler = (*Error)(nil)
// Bucket describes a B2 bucket
type Bucket struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base
@@ -214,10 +206,9 @@ type FileInfo struct {
// CreateBucketRequest is used to create a bucket
type CreateBucketRequest struct {
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
AccountID string `json:"accountId"`
Name string `json:"bucketName"`
Type string `json:"bucketType"`
}
// DeleteBucketRequest is used to create a bucket
@@ -340,11 +331,3 @@ type CopyPartRequest struct {
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
}
// UpdateBucketRequest describes a request to modify a B2 bucket
type UpdateBucketRequest struct {
ID string `json:"bucketId"`
AccountID string `json:"accountId"`
Type string `json:"bucketType,omitempty"`
LifecycleRules []LifecycleRule `json:"lifecycleRules,omitempty"`
}

View File

@@ -32,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest"
@@ -58,7 +57,9 @@ const (
minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
)
// Globals
@@ -73,7 +74,6 @@ func init() {
Name: "b2",
Description: "Backblaze B2",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
Name: "account",
Help: "Account ID or Application Key ID.",
@@ -149,18 +149,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files.
@@ -200,41 +188,16 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true,
}, {
Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute),
Default: memoryPoolFlushTime,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
Help: `How often internal memory buffer pools will be flushed.
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, {
Name: "memory_pool_use_mmap",
Default: false,
Advanced: true,
Hide: fs.OptionHideBoth,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, {
Name: "lifecycle",
Help: `Set the number of days deleted files should be kept when creating a bucket.
On bucket creation, this parameter is used to create a lifecycle rule
for the entire bucket.
If lifecycle is 0 (the default) it does not create a lifecycle rule so
the default B2 behaviour applies. This is to create versions of files
on delete and overwrite and to keep them indefinitely.
If lifecycle is >0 then it creates a single rule setting the number of
days before a file that is deleted or overwritten is deleted
permanently. This is known as daysFromHidingToDeleting in the b2 docs.
The minimum value for this parameter is 1 day.
You can also enable hard_delete in the config also which will mean
deletions won't cause versions but overwrites will still cause
versions to be made.
See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
`,
Default: 0,
Default: memoryPoolUseMmap,
Advanced: true,
Help: `Whether to use mmap buffers in internal memory pool.`,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -261,11 +224,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
Lifecycle int `config:"lifecycle"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -290,6 +253,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
}
// Object describes a b2 object
@@ -494,6 +458,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
}
f.setRoot(root)
f.features = (&fs.Features{
@@ -627,24 +597,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock()
}
// getRW gets a RW buffer and an upload token
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
//
// If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get()
if !noBuf {
rw = multipart.NewRW()
buf = f.pool.Get()
}
return rw
return buf
}
// putRW returns a RW buffer to the memory pool and returns an upload
// token
// putBuf returns a buffer to the memory pool and an upload token
//
// If buf is nil then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) {
if rw != nil {
_ = rw.Close()
// If noBuf is set then it just returns the upload token
func (f *Fs) putBuf(buf []byte, noBuf bool) {
if !noBuf {
f.pool.Put(buf)
}
f.uploadToken.Put()
}
@@ -851,7 +820,7 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
d := fs.NewDir(bucket.Name, time.Time{})
entries = append(entries, d)
return nil
@@ -944,14 +913,11 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
@@ -997,7 +963,7 @@ func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType strin
if bucketType != "" {
return bucketType, nil
}
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn reads bucket Types
return nil
})
@@ -1032,7 +998,7 @@ func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, e
if bucketID != "" {
return bucketID, nil
}
err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
// listBucketsToFn sets IDs
return nil
})
@@ -1096,11 +1062,6 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
Name: f.opt.Enc.FromStandardName(bucket),
Type: "allPrivate",
}
if f.opt.Lifecycle > 0 {
request.LifecycleRules = []api.LifecycleRule{{
DaysFromHidingToDeleting: &f.opt.Lifecycle,
}}
}
var response api.Bucket
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
@@ -1332,7 +1293,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil {
return err
}
return up.Copy(ctx)
return up.Upload(ctx)
}
dstBucket, dstPath := dstObj.split()
@@ -1461,7 +1422,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil {
return "", err
}
absPath := "/" + urlEncode(bucketPath)
absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket)
if err != nil {
@@ -1900,11 +1861,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
if size < 0 {
if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false)
buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
n, err := io.ReadFull(in, buf)
if err == nil {
bufReader := bufio.NewReader(in)
in = bufReader
@@ -1915,30 +1876,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
o.fs.putRW(rw)
o.fs.putBuf(buf, false)
return err
}
// NB Stream returns the buffer and token
return up.Stream(ctx, rw)
} else if err == io.EOF {
return up.Stream(ctx, buf)
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw)
size = n
in = rw
defer o.fs.putBuf(buf, false)
size = int64(n)
in = bytes.NewReader(buf[:n])
} else {
o.fs.putRW(rw)
o.fs.putBuf(buf, false)
return err
}
} else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil {
return err
}
up := chunkWriter.(*largeUpload)
return o.decodeMetaDataFileInfo(up.info)
return up.Upload(ctx)
}
modTime := src.ModTime(ctx)
@@ -2046,41 +2003,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response)
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split()
@@ -2106,149 +2028,16 @@ func (o *Object) ID() string {
return o.id
}
var lifecycleHelp = fs.CommandHelp{
Name: "lifecycle",
Short: "Read or set the lifecycle for a bucket",
Long: `This command can be used to read or set the lifecycle for a bucket.
Usage Examples:
To show the current lifecycle rules:
rclone backend lifecycle b2:bucket
This will dump something like this showing the lifecycle rules.
[
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"fileNamePrefix": ""
}
]
If there are no lifecycle rules (the default) then it will just return [].
To reset the current lifecycle rules:
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
This will run and then print the new lifecycle rules as above.
Rclone only lets you set lifecycles for the whole bucket with the
fileNamePrefix = "".
You can't disable versioning with B2. The best you can do is to set
the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
the config also which will mean deletions won't cause versions but
overwrites will still cause versions to be made.
rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
},
}
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
var newRule api.LifecycleRule
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
}
newRule.DaysFromHidingToDeleting = &days
}
if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
}
newRule.DaysFromUploadingToHiding = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
var bucket *api.Bucket
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err
}
opts := rest.Opts{
Method: "POST",
Path: "/b2_update_bucket",
}
var request = api.UpdateBucketRequest{
ID: bucketID,
AccountID: f.info.AccountID,
LifecycleRules: []api.LifecycleRule{newRule},
}
var response api.Bucket
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, err
}
bucket = &response
} else {
err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
bucket = b
return nil
})
if err != nil {
return nil, err
}
}
if bucket == nil {
return nil, fs.ErrorDirNotFound
}
return bucket.LifecycleRules, nil
}
var commandHelp = []fs.CommandHelp{
lifecycleHelp,
}
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
switch name {
case "lifecycle":
return f.lifecycleCommand(ctx, name, arg, opt)
default:
return nil, fs.ErrorCommandNotFound
}
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}
)

View File

@@ -5,6 +5,7 @@
package b2
import (
"bytes"
"context"
"crypto/sha1"
"encoding/hex"
@@ -13,6 +14,7 @@ import (
"io"
"strings"
"sync"
"time"
"github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs"
@@ -78,14 +80,12 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded
size int64 // total size
parts int // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
parts int64 // calculated number of parts, if known
sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
}
// newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,16 +93,18 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size()
parts := 0
parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize
if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize))
parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 {
parts++
}
sha1SliceSize = parts
}
opts := rest.Opts{
@@ -150,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID,
size: size,
parts: parts,
sha1s: make([]string, 0, 16),
sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize),
}
// unwrap the accounting from the input, we use wrap to put it
@@ -169,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock()
if len(up.uploads) > 0 {
defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
return upload, nil
}
@@ -203,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock()
}
// Add an sha1 to the being built up sha1s
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
up.sha1smu.Lock()
defer up.sha1smu.Unlock()
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Transfer a chunk
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
// Get upload URL
upload, err := up.getUploadURL(ctx)
@@ -243,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err
}
in := newHashAppendingReader(reader, sha1.New())
sizeWithHash := size + int64(in.AdditionalLength())
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization
//
@@ -274,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in),
ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end",
},
ContentLength: &sizeWithHash,
ContentLength: &size,
}
var response api.UploadPartResponse
@@ -285,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
// On retryable error clear PartUploadURL
if retry {
@@ -293,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil
}
up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum())
up.sha1s[part-1] = in.HexSum()
return retry, err
})
if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
fs.Debugf(up.o, "Done sending chunk %d", part)
}
return size, err
return err
}
// Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{
Method: "POST",
Path: "/b2_copy_part",
}
offset := int64(part) * up.chunkSize // where we are in the source file
offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{
SourceID: up.src.id,
LargeFileID: up.id,
PartNumber: int64(part + 1),
PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
}
var response api.UploadPartResponse
@@ -325,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
}
up.addSha1(part, response.SHA1)
up.sha1s[part-1] = response.SHA1
return retry, err
})
if err != nil {
@@ -336,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err
}
// Close closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error {
// finish closes off the large upload
func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{
Method: "POST",
@@ -355,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil {
return err
}
up.info = &response
return nil
return up.o.decodeMetaDataFileInfo(&response)
}
// Abort aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error {
// cancel aborts the large upload
func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{
Method: "POST",
@@ -385,98 +355,157 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF.
//
// Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true
)
up.size = initialUploadBlock.Size()
for part := 0; hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW
if part == 1 {
rw = initialUploadBlock
} else {
rw = up.f.getRW(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 1 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
up.size = int64(len(initialUploadBlock))
g.Go(func() error {
for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
var buf []byte
if part == 1 {
buf = initialUploadBlock
} else {
buf = up.f.getBuf(false)
}
}
// Keep stats up to date
up.parts = part
up.size += n
if part > maxParts {
up.f.putRW(rw)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putBuf(buf, false)
return nil
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putRW(rw)
_, err = up.WriteChunk(gCtx, part, rw)
return err
})
}
// Read the chunk
var n int
if part == 1 {
n = len(buf)
} else {
n, err = io.ReadFull(up.in, buf)
if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
}
// Copy the chunks from the source to the destination
func (up *largeUpload) Copy(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
// Upload uploads the chunks from the input
func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var (
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
g, gCtx = errgroup.WithContext(ctx)
remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
)
g.SetLimit(up.f.opt.UploadConcurrency)
for part := 0; part <= up.parts; part++ {
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in copying all the other parts.
if gCtx.Err() != nil {
break
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
// If using large chunk size then make a temporary pool
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
uploadPool = up.f.pool
} else {
uploadPool = pool.New(
time.Duration(up.f.opt.MemoryPoolFlushTime),
int(up.chunkSize),
ci.Transfers,
up.f.opt.MemoryPoolUseMmap,
)
defer uploadPool.Flush()
}
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
putBuf(buf)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait()
if err != nil {
return err
}
return up.Close(ctx)
return up.finish(ctx)
}

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message
}
if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
}
return out
}
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini
// Types of things in Item
const (
ItemTypeFolder = "folder"
ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted"
)
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others
type Item struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
Type string `json:"type"`
ID string `json:"id"`
SequenceID string `json:"sequence_id"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
SharedLink struct {
URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"`
@@ -292,30 +281,3 @@ type User struct {
Address string `json:"address"`
AvatarURL string `json:"avatar_url"`
}
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -149,23 +149,6 @@ func init() {
Default: "",
Help: "Only show items owned by the login (email address) passed in.",
Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -279,29 +262,19 @@ type Options struct {
AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
}
// Fs represents a remote box
type Fs struct {
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
features *fs.Features // optional features
srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency
}
// Object describes a box object
@@ -449,14 +422,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
name: name,
root: root,
opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -469,11 +440,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
}
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -716,17 +682,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
entries = append(entries, o)
}
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false
})
if err != nil {
@@ -1166,7 +1121,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) {
var (
deleteErrors atomic.Uint64
deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup
)
@@ -1182,7 +1137,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1)
atomic.AddInt64(&deleteErrors, 1)
}
}()
} else {
@@ -1191,277 +1146,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false
})
wg.Wait()
if deleteErrors.Load() != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
}
return err
}
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
// box can send duplicate Event IDs. Use this map to track and filter
// the ones we've already processed.
processedEventIDs := make(map[string]time.Time)
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
// Garbage collect EventIDs older than 1 minute
for eventID, timestamp := range processedEventIDs {
if time.Since(timestamp) > time.Minute {
delete(processedEventIDs, eventID)
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition, processedEventIDs)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string, processedEventIDs map[string]time.Time) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
eventDetails := fmt.Sprintf("[%q(%d)|%s|%s|%s|%s]", entry.Source.Name, entry.Source.SequenceID,
entry.Source.Type, entry.EventType, entry.Source.ID, entry.EventID)
if entry.EventID == "" {
fs.Debugf(f, "%s ignored due to missing EventID", eventDetails)
continue
}
if _, ok := processedEventIDs[entry.EventID]; ok {
fs.Debugf(f, "%s ignored due to duplicate EventID", eventDetails)
continue
}
processedEventIDs[entry.EventID] = time.Now()
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
fs.Debugf(f, "%s ignored due to missing SourceID", eventDetails)
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
fs.Debugf(f, "%s ignored due to unsupported SourceType", eventDetails)
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
fs.Debugf(f, "%s ignored due to unsupported EventType", eventDetails)
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
fs.Debugf(f, "%s ignored due to old SequenceID (%q)", eventDetails, itemMeta.SequenceID)
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
fs.Debugf(f, "%s added old path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s old parent not cached", eventDetails)
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
fs.Debugf(f, "%s added new path (%q) for notify", eventDetails, path)
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
} else {
fs.Debugf(f, "%s new parent not found", eventDetails)
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func (f *Fs) DirCacheFlush() {

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})

View File

@@ -40,7 +40,6 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{
"PublicLink",
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"UserInfo",

View File

@@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return do(ctx, uRemote, expire, unlink)
}
// PutUnchecked in to the remote path with the modTime given of the given size
// Put in to the remote path with the modTime given of the given size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return

View File

@@ -11,7 +11,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{}
)

View File

@@ -257,16 +257,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt)
}
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed {
@@ -989,8 +979,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType)
var (
wrappedPath string
isMetadataFile bool
wrappedPath string
)
switch entryType {
case fs.EntryDirectory:
@@ -998,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
if !isMetadataFile {
return
}
wrappedPath = makeMetadataName(path)
default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests"
)
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt)
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
}
// TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
}
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip"
opt := defaultOpt
opt.RemoteName = name + ":"
opt.ExtraConfig = []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
}
opt.QuickTestOK = true
fstests.Run(t, &opt)
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
})
}
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"},
},
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"},
},
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})

View File

@@ -71,7 +71,7 @@ const (
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
minChunkSize = fs.SizeSuffix(googleapi.MinUploadChunkSize)
defaultChunkSize = 8 * fs.Mebi
partialFields = "id,name,size,md5Checksum,sha1Checksum,sha256Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
defaultXDGIcon = "text-html"
@@ -323,13 +323,13 @@ rather than shortcuts themselves when doing server side copies.`,
}, {
Name: "skip_checksum_gphotos",
Default: false,
Help: `Skip checksums on Google photos and videos only.
Help: `Skip MD5 checksum on Google photos and videos only.
Use this if you get checksum errors when transferring Google photos or
videos.
Setting this flag will cause Google photos and videos to return a
blank checksums.
blank MD5 checksum.
Google photos are identified by being in the "photos" space.
@@ -594,32 +594,10 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the
resource key is not needed.
resource key is no needed.
`,
Advanced: true,
Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
`,
Advanced: true,
Default: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -694,7 +672,6 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
@@ -751,8 +728,6 @@ type Object struct {
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
sha1sum string // sha1sum of the object
sha256sum string // sha256sum of the object
v2Download bool // generate v2 download link ondemand
}
@@ -1416,8 +1391,6 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
for _, space := range info.Spaces {
if space == "photos" {
info.Md5Checksum = ""
info.Sha1Checksum = ""
info.Sha256Checksum = ""
break
}
}
@@ -1426,8 +1399,6 @@ func (f *Fs) newRegularObject(remote string, info *drive.File) fs.Object {
baseObject: f.newBaseObject(remote, info),
url: fmt.Sprintf("%sfiles/%s?alt=media", f.svc.BasePath, actualID(info.Id)),
md5sum: strings.ToLower(info.Md5Checksum),
sha1sum: strings.ToLower(info.Sha1Checksum),
sha256sum: strings.ToLower(info.Sha256Checksum),
v2Download: f.opt.V2DownloadMinSize != -1 && info.Size >= int64(f.opt.V2DownloadMinSize),
}
if info.ResourceKey != "" {
@@ -1920,7 +1891,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems {
if len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
}
@@ -3013,7 +2984,7 @@ func (f *Fs) DirCacheFlush() {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet(hash.MD5, hash.SHA1, hash.SHA256)
return hash.Set(hash.MD5)
}
func (f *Fs) changeChunkSize(chunkSizeString string) (err error) {
@@ -3574,16 +3545,10 @@ func (o *baseObject) Remote() string {
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == hash.MD5 {
return o.md5sum, nil
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
if t == hash.SHA1 {
return o.sha1sum, nil
}
if t == hash.SHA256 {
return o.sha256sum, nil
}
return "", hash.ErrUnsupported
return o.md5sum, nil
}
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {

View File

@@ -8,19 +8,121 @@ package dropbox
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/lib/atexit"
)
const (
maxBatchSize = 1000 // max size the batch can be
defaultTimeoutSync = 500 * time.Millisecond // kick off the batch if nothing added for this long (sync)
defaultTimeoutAsync = 10 * time.Second // kick off the batch if nothing added for this long (ssync)
defaultBatchSizeAsync = 100 // default batch size if async
)
// batcher holds info about the current items waiting for upload
type batcher struct {
f *Fs // Fs this batch is part of
mode string // configured batch mode
size int // maximum size for batch
timeout time.Duration // idle timeout for batch
async bool // whether we are using async batching
in chan batcherRequest // incoming items to batch
closed chan struct{} // close to indicate batcher shut down
atexit atexit.FnHandle // atexit handle
shutOnce sync.Once // make sure we shutdown once only
wg sync.WaitGroup // wait for shutdown
}
// batcherRequest holds an incoming request with a place for a reply
type batcherRequest struct {
commitInfo *files.UploadSessionFinishArg
result chan<- batcherResponse
}
// Return true if batcherRequest is the quit request
func (br *batcherRequest) isQuit() bool {
return br.commitInfo == nil
}
// Send this to get the engine to quit
var quitRequest = batcherRequest{}
// batcherResponse holds a response to be delivered to clients waiting
// for a batch to complete.
type batcherResponse struct {
err error
entry *files.FileMetadata
}
// newBatcher creates a new batcher structure
func newBatcher(ctx context.Context, f *Fs, mode string, size int, timeout time.Duration) (*batcher, error) {
// fs.Debugf(f, "Creating batcher with mode %q, size %d, timeout %v", mode, size, timeout)
if size > maxBatchSize || size < 0 {
return nil, fmt.Errorf("dropbox: batch size must be < %d and >= 0 - it is currently %d", maxBatchSize, size)
}
async := false
switch mode {
case "sync":
if size <= 0 {
ci := fs.GetConfig(ctx)
size = ci.Transfers
}
if timeout <= 0 {
timeout = defaultTimeoutSync
}
case "async":
if size <= 0 {
size = defaultBatchSizeAsync
}
if timeout <= 0 {
timeout = defaultTimeoutAsync
}
async = true
case "off":
size = 0
default:
return nil, fmt.Errorf("dropbox: batch mode must be sync|async|off not %q", mode)
}
b := &batcher{
f: f,
mode: mode,
size: size,
timeout: timeout,
async: async,
in: make(chan batcherRequest, size),
closed: make(chan struct{}),
}
if b.Batching() {
b.atexit = atexit.Register(b.Shutdown)
b.wg.Add(1)
go b.commitLoop(context.Background())
}
return b, nil
}
// Batching returns true if batching is active
func (b *batcher) Batching() bool {
return b.size > 0
}
// finishBatch commits the batch, returning a batch status to poll or maybe complete
func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
func (b *batcher) finishBatch(ctx context.Context, items []*files.UploadSessionFinishArg) (complete *files.UploadSessionFinishBatchResult, err error) {
var arg = &files.UploadSessionFinishBatchArg{
Entries: items,
}
err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
err = b.f.pacer.Call(func() (bool, error) {
complete, err = b.f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
@@ -37,10 +139,23 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
return complete, nil
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []*files.FileMetadata, errors []error) (err error) {
// commit a batch
func (b *batcher) commitBatch(ctx context.Context, items []*files.UploadSessionFinishArg, results []chan<- batcherResponse) (err error) {
// If commit fails then signal clients if sync
var signalled = b.async
defer func() {
if err != nil && !signalled {
// Signal to clients that there was an error
for _, result := range results {
result <- batcherResponse{err: err}
}
}
}()
desc := fmt.Sprintf("%s batch length %d starting with: %s", b.mode, len(items), items[0].Commit.Path)
fs.Debugf(b.f, "Committing %s", desc)
// finalise the batch getting either a result or a job id to poll
complete, err := f.finishBatch(ctx, items)
complete, err := b.finishBatch(ctx, items)
if err != nil {
return err
}
@@ -51,13 +166,19 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
return fmt.Errorf("expecting %d items in batch but got %d", len(results), len(entries))
}
// Format results for return
// Report results to clients
var (
errorTag = ""
errorCount = 0
)
for i := range results {
item := entries[i]
resp := batcherResponse{}
if item.Tag == "success" {
results[i] = item.Success
resp.entry = item.Success
} else {
errorTag := item.Tag
errorCount++
errorTag = item.Tag
if item.Failure != nil {
errorTag = item.Failure.Tag
if item.Failure.LookupFailed != nil {
@@ -70,9 +191,112 @@ func (f *Fs) commitBatch(ctx context.Context, items []*files.UploadSessionFinish
errorTag += "/" + item.Failure.PropertiesError.Tag
}
}
errors[i] = fmt.Errorf("upload failed: %s", errorTag)
resp.err = fmt.Errorf("batch upload failed: %s", errorTag)
}
if !b.async {
results[i] <- resp
}
}
// Show signalled so no need to report error to clients from now on
signalled = true
// Report an error if any failed in the batch
if errorTag != "" {
return fmt.Errorf("batch had %d errors: last error: %s", errorCount, errorTag)
}
fs.Debugf(b.f, "Committed %s", desc)
return nil
}
// commitLoop runs the commit engine in the background
func (b *batcher) commitLoop(ctx context.Context) {
var (
items []*files.UploadSessionFinishArg // current batch of uncommitted files
results []chan<- batcherResponse // current batch of clients awaiting results
idleTimer = time.NewTimer(b.timeout)
commit = func() {
err := b.commitBatch(ctx, items, results)
if err != nil {
fs.Errorf(b.f, "%s batch commit: failed to commit batch length %d: %v", b.mode, len(items), err)
}
items, results = nil, nil
}
)
defer b.wg.Done()
defer idleTimer.Stop()
idleTimer.Stop()
outer:
for {
select {
case req := <-b.in:
if req.isQuit() {
break outer
}
items = append(items, req.commitInfo)
results = append(results, req.result)
idleTimer.Stop()
if len(items) >= b.size {
commit()
} else {
idleTimer.Reset(b.timeout)
}
case <-idleTimer.C:
if len(items) > 0 {
fs.Debugf(b.f, "Batch idle for %v so committing", b.timeout)
commit()
}
}
}
// commit any remaining items
if len(items) > 0 {
commit()
}
}
// Shutdown finishes any pending batches then shuts everything down
//
// Can be called from atexit handler
func (b *batcher) Shutdown() {
if !b.Batching() {
return
}
b.shutOnce.Do(func() {
atexit.Unregister(b.atexit)
fs.Infof(b.f, "Committing uploads - please wait...")
// show that batcher is shutting down
close(b.closed)
// quit the commitLoop by sending a quitRequest message
//
// Note that we don't close b.in because that will
// cause write to closed channel in Commit when we are
// exiting due to a signal.
b.in <- quitRequest
b.wg.Wait()
})
}
// Commit commits the file using a batch call, first adding it to the
// batch and then waiting for the batch to complete in a synchronous
// way if async is not set.
func (b *batcher) Commit(ctx context.Context, commitInfo *files.UploadSessionFinishArg) (entry *files.FileMetadata, err error) {
select {
case <-b.closed:
return nil, fserrors.FatalError(errors.New("batcher is shutting down"))
default:
}
fs.Debugf(b.f, "Adding %q to batch", commitInfo.Commit.Path)
resp := make(chan batcherResponse, 1)
b.in <- batcherRequest{
commitInfo: commitInfo,
result: resp,
}
// If running async then don't wait for the result
if b.async {
return nil, nil
}
result := <-resp
return result.entry, result.err
}

View File

@@ -47,7 +47,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -122,14 +121,6 @@ var (
// Errors
errNotSupportedInSharedMode = fserrors.NoRetryError(errors.New("not supported in shared files mode"))
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 1000,
DefaultTimeoutSync: 500 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 100,
}
)
// Gets an oauth config with the right scopes
@@ -161,7 +152,7 @@ func init() {
},
})
},
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "chunk_size",
Help: fmt.Sprintf(`Upload chunk size (< %v).
@@ -219,6 +210,68 @@ Note that we don't unmount the shared folder afterwards so the
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: "batch_mode",
Help: `Upload file batching sync|async|off.
This sets the batch mode used by rclone.
For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)
This has 3 possible values
- off - no batching
- sync - batch uploads and check completion (default)
- async - batch upload and don't check completion
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
`,
Default: "sync",
Advanced: true,
}, {
Name: "batch_size",
Help: `Max number of files in upload batch.
This sets the batch size of files to upload. It has to be less than 1000.
By default this is 0 which means rclone which calculate the batch size
depending on the setting of batch_mode.
- batch_mode: async - default batch_size is 100
- batch_mode: sync - default batch_size is the same as --transfers
- batch_mode: off - not in use
Rclone will close any outstanding batches when it exits which may make
a delay on quit.
Setting this is a great idea if you are uploading lots of small files
as it will make them a lot quicker. You can use --transfers 32 to
maximise throughput.
`,
Default: 0,
Advanced: true,
}, {
Name: "batch_timeout",
Help: `Max time to allow an idle upload batch before uploading.
If an upload batch is idle for more than this long then it will be
uploaded.
The default for this is 0 which means rclone will choose a sensible
default based on the batch_mode in use.
- batch_mode: async - default batch_timeout is 10s
- batch_mode: sync - default batch_timeout is 500ms
- batch_mode: off - not in use
`,
Default: fs.Duration(0),
Advanced: true,
}, {
Name: "batch_commit_timeout",
Help: `Max time to wait for a batch to finish committing`,
Default: fs.Duration(10 * time.Minute),
Advanced: true,
}, {
Name: "pacer_min_sleep",
Default: defaultMinSleep,
@@ -237,22 +290,23 @@ shared folder.`,
encoder.EncodeDel |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8,
}}...), defaultBatcherOptions.FsOptions("For full info see [the main docs](https://rclone.org/dropbox/#batch-mode)\n\n")...),
}}...),
})
}
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
BatchCommitTimeout fs.Duration `config:"batch_commit_timeout"`
AsyncBatch bool `config:"async_batch"`
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -271,7 +325,7 @@ type Fs struct {
slashRootSlash string // root with "/" prefix and postfix, lowercase
pacer *fs.Pacer // To pace the API calls
ns string // The namespace we are using or "" for none
batcher *batcher.Batcher[*files.UploadSessionFinishArg, *files.FileMetadata]
batcher *batcher // batch builder
}
// Object describes a dropbox object
@@ -397,11 +451,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci: ci,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(opt.PacerMinSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
f.batcher, err = newBatcher(ctx, f, f.opt.BatchMode, f.opt.BatchSize, time.Duration(f.opt.BatchTimeout))
if err != nil {
return nil, err
}
@@ -1672,7 +1722,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
// If we are batching then we should have written all the data now
// store the commit info now for a batch commit
if o.fs.batcher.Batching() {
return o.fs.batcher.Commit(ctx, o.remote, args)
return o.fs.batcher.Commit(ctx, args)
}
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded
}
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 {
return 0
}
code, err := strconv.Atoi(matches[1])
code, err := strconv.Atoi(matches[0])
if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0

View File

@@ -158,9 +158,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token
token string // current access token
tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported
}
// Object describes a filefabric object
@@ -243,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC
code := status.GetCode()
if code == "login_token_expired" {
f.tokenExpired.Add(1)
atomic.AddInt32(&f.tokenExpired, 1)
} else {
for _, retryCode := range retryStatusCodes {
if code == retryCode.code {
@@ -323,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false
defer func() {
if refreshed {
f.tokenExpired.Store(0)
atomic.StoreInt32(&f.tokenExpired, 0)
}
f.tokenMu.Unlock()
}()
expired := f.tokenExpired.Load() != 0
expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired {
fs.Debugf(f, "Token invalid - refreshing")
}

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/jlaffaye/ftp"
"github.com/rclone/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -249,6 +249,7 @@ type Fs struct {
pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime
@@ -361,36 +362,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
return fserrors.ShouldRetry(err), err
}
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
@@ -408,7 +383,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return nil, err
}
// Connect using cleartext only for non TLS
if tlsConfig == nil {
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
@@ -417,7 +392,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, tlsConfig)
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
@@ -439,9 +414,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -596,6 +571,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
}
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx)
f := &Fs{
@@ -608,6 +596,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass,
dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/lib/batcher"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
@@ -72,14 +71,6 @@ var (
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Configure the batcher
defaultBatcherOptions = batcher.Options{
MaxBatchSize: 50,
DefaultTimeoutSync: 1000 * time.Millisecond,
DefaultTimeoutAsync: 10 * time.Second,
DefaultBatchSizeAsync: 50,
}
)
// Register with Fs
@@ -120,7 +111,7 @@ will count towards storage in your Google Account.`)
}
return nil, fmt.Errorf("unknown state %q", config.State)
},
Options: append(append(oauthutil.SharedOptions, []fs.Option{{
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "read_only",
Default: false,
Help: `Set to make the Google Photos backend read only.
@@ -167,7 +158,7 @@ listings and won't be transferred.`,
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}}...), defaultBatcherOptions.FsOptions("")...),
}}...),
})
}
@@ -178,9 +169,6 @@ type Options struct {
StartYear int `config:"start_year"`
IncludeArchived bool `config:"include_archived"`
Enc encoder.MultiEncoder `config:"encoding"`
BatchMode string `config:"batch_mode"`
BatchSize int `config:"batch_size"`
BatchTimeout fs.Duration `config:"batch_timeout"`
}
// Fs represents a remote storage server
@@ -199,7 +187,6 @@ type Fs struct {
uploadedMu sync.Mutex // to protect the below
uploaded dirtree.DirTree // record of uploaded items
createMu sync.Mutex // held when creating albums to prevent dupes
batcher *batcher.Batcher[uploadedItem, *api.MediaItem]
}
// Object describes a storage object
@@ -325,14 +312,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
albums: map[bool]*albums{},
uploaded: dirtree.New(),
}
batcherOptions := defaultBatcherOptions
batcherOptions.Mode = f.opt.BatchMode
batcherOptions.Size = f.opt.BatchSize
batcherOptions.Timeout = time.Duration(f.opt.BatchTimeout)
f.batcher, err = batcher.New(ctx, f, f.commitBatch, batcherOptions)
if err != nil {
return nil, err
}
f.features = (&fs.Features{
ReadMimeType: true,
}).Fill(ctx, f)
@@ -802,13 +781,6 @@ func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.None)
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
f.batcher.Shutdown()
return nil
}
// ------------------------------------------------------------
// Fs returns the parent Fs
@@ -989,82 +961,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
// input to the batcher
type uploadedItem struct {
AlbumID string // desired album
UploadToken string // upload ID
}
// Commit a batch of items to albumID returning the errors in errors
func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error, albumID string) {
// Create the media item from an UploadToken, optionally adding to an album
opts := rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
}
itemsInBatch := 0
for i := range items {
if items[i].AlbumID == albumID {
request.NewMediaItems = append(request.NewMediaItems, api.NewMediaItem{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: items[i].UploadToken,
},
})
itemsInBatch++
}
}
var result api.BatchCreateResponse
var resp *http.Response
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
err = fmt.Errorf("failed to create media item: %w", err)
}
if err == nil && len(result.NewMediaItemResults) != itemsInBatch {
err = fmt.Errorf("bad response to BatchCreate expecting %d items but got %d", itemsInBatch, len(result.NewMediaItemResults))
}
j := 0
for i := range items {
if items[i].AlbumID == albumID {
if err == nil {
media := &result.NewMediaItemResults[j]
if media.Status.Code != 0 {
errors[i] = fmt.Errorf("upload failed: %s (%d)", media.Status.Message, media.Status.Code)
} else {
results[i] = &media.MediaItem
}
} else {
errors[i] = err
}
j++
}
}
}
// Called by the batcher to commit a batch
func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*api.MediaItem, errors []error) (err error) {
// Discover all the AlbumIDs as we have to upload these separately
//
// Should maybe have one batcher per AlbumID
albumIDs := map[string]struct{}{}
for i := range items {
albumIDs[items[i].AlbumID] = struct{}{}
}
// batch the albums
for albumID := range albumIDs {
// errors returned in errors
f.commitBatchAlbumID(ctx, items, results, errors, albumID)
}
return nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
@@ -1125,26 +1021,37 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("empty upload token")
}
uploaded := uploadedItem{
AlbumID: albumID,
UploadToken: uploadToken,
// Create the media item from an UploadToken, optionally adding to an album
opts = rest.Opts{
Method: "POST",
Path: "/mediaItems:batchCreate",
}
// Save the upload into an album
var info *api.MediaItem
if o.fs.batcher.Batching() {
info, err = o.fs.batcher.Commit(ctx, o.remote, uploaded)
} else {
errors := make([]error, 1)
results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err != nil {
err = errors[0]
info = results[0]
}
var request = api.BatchCreateRequest{
AlbumID: albumID,
NewMediaItems: []api.NewMediaItem{
{
SimpleMediaItem: api.SimpleMediaItem{
UploadToken: uploadToken,
},
},
},
}
o.setMetaData(info)
var result api.BatchCreateResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, request, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("failed to create media item: %w", err)
}
if len(result.NewMediaItemResults) != 1 {
return errors.New("bad response to BatchCreate wrong number of items")
}
mediaItemResult := result.NewMediaItemResults[0]
if mediaItemResult.Status.Code != 0 {
return fmt.Errorf("upload failed: %s (%d)", mediaItemResult.Status.Message, mediaItemResult.Status.Code)
}
o.setMetaData(&mediaItemResult.MediaItem)
// Add upload to internal storage
if pattern.isUpload {

View File

@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
},
UnimplementableObjectMethods: []string{},
}

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
)
// Fs represents a HDFS server
@@ -32,15 +31,8 @@ type Fs struct {
opt Options // options for this backend
ci *fs.ConfigInfo // global config
client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
}
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG")
@@ -122,7 +114,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
ci: fs.GetConfig(ctx),
client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
}
f.features = (&fs.Features{

View File

@@ -5,12 +5,10 @@ package hdfs
import (
"context"
"errors"
"io"
"path"
"time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers"
@@ -108,7 +106,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(o.remote)
realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath)
@@ -143,23 +141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the datanodes have acknowledged all writes but not yet
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
err = out.Close()
if err != nil {
cleanup()
return err

View File

@@ -395,7 +395,7 @@ type JottaFile struct {
State string `xml:"currentRevision>state"`
CreatedAt JottaTime `xml:"currentRevision>created"`
ModifiedAt JottaTime `xml:"currentRevision>modified"`
UpdatedAt JottaTime `xml:"currentRevision>updated"`
Updated JottaTime `xml:"currentRevision>updated"`
Size int64 `xml:"currentRevision>size"`
MimeType string `xml:"currentRevision>mime"`
MD5 string `xml:"currentRevision>md5"`

View File

@@ -67,13 +67,9 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
@@ -92,33 +88,6 @@ func init() {
Description: "Jottacloud",
NewFs: NewFs,
Config: Config,
MetadataInfo: &fs.MetadataInfo{
Help: `Jottacloud has limited support for metadata, currently an extended set of timestamps.`,
System: map[string]fs.MetadataHelp{
"btime": {
Help: "Time of file birth (creation), read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"mtime": {
Help: "Time of last modification, read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
"utime": {
Help: "Time of last upload, when current revision was created, generated by backend",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
ReadOnly: true,
},
"content-type": {
Help: "MIME type, also known as media type",
Type: "string",
Example: "text/plain",
ReadOnly: true,
},
},
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "md5_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate the MD5 if required.",
@@ -169,11 +138,8 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, {
Value: "telia_se",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
@@ -272,32 +238,17 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err)
}
return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config
case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
AuthURL: teliaCloudAuthURL,
TokenURL: teliaCloudTokenURL,
},
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
@@ -524,9 +475,7 @@ type Object struct {
remote string
hasMetaData bool
size int64
createTime time.Time
modTime time.Time
updateTime time.Time
md5 string
mimeType string
}
@@ -1001,9 +950,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
CanHaveEmptyDirectories: true,
ReadMimeType: true,
WriteMimeType: false,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: false,
}).Fill(ctx, f)
f.jfsSrv.SetErrorHandler(errorHandler)
if opt.TrashedOnly { // we cannot support showing Trashed Files when using ListR right now
@@ -1190,7 +1136,6 @@ func parseListRStream(ctx context.Context, r io.Reader, filesystem *Fs, callback
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size,
md5: f.Checksum,
createTime: time.Time(f.Created),
modTime: time.Time(f.Modified),
})
}
@@ -1420,7 +1365,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
// is currently in trash, but can be made to match, it will be
// restored. Returns ErrorObjectNotFound if upload will be necessary
// to get a matching remote file.
func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Time, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
func (f *Fs) createOrUpdate(ctx context.Context, file string, modTime time.Time, size int64, md5 string) (info *api.JottaFile, err error) {
opts := rest.Opts{
Method: "POST",
Path: f.filePath(file),
@@ -1430,10 +1375,11 @@ func (f *Fs) createOrUpdate(ctx context.Context, file string, createTime time.Ti
opts.Parameters.Set("cphash", "true")
fileDate := api.JottaTime(modTime).String()
opts.ExtraHeaders["JSize"] = strconv.FormatInt(size, 10)
opts.ExtraHeaders["JMd5"] = md5
opts.ExtraHeaders["JCreated"] = api.JottaTime(createTime).String()
opts.ExtraHeaders["JModified"] = api.JottaTime(modTime).String()
opts.ExtraHeaders["JCreated"] = fileDate
opts.ExtraHeaders["JModified"] = fileDate
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
@@ -1496,7 +1442,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
info, err = f.createOrUpdate(ctx, remote, srcObj.createTime, srcObj.modTime, srcObj.size, srcObj.md5)
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
}
if err != nil {
@@ -1759,9 +1705,7 @@ func (o *Object) setMetaData(info *api.JottaFile) (err error) {
o.size = info.Size
o.md5 = info.MD5
o.mimeType = info.MimeType
o.createTime = time.Time(info.CreatedAt)
o.modTime = time.Time(info.ModifiedAt)
o.updateTime = time.Time(info.UpdatedAt)
return nil
}
@@ -1806,7 +1750,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
// (note that if size/md5 does not match, the file content will
// also be modified if deduplication is possible, i.e. it is
// important to use correct/latest values)
_, err = o.fs.createOrUpdate(ctx, o.remote, o.createTime, modTime, o.size, o.md5)
_, err = o.fs.createOrUpdate(ctx, o.remote, modTime, o.size, o.md5)
if err != nil {
if err == fs.ErrorObjectNotFound {
// file was modified (size/md5 changed) between readMetaData and createOrUpdate?
@@ -1943,37 +1887,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Wrap the accounting back onto the stream
in = wrap(in)
}
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to read metadata from source object: %w", err)
}
var createdTime string
var modTime string
if meta != nil {
if v, ok := meta["btime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v) // metadata stores RFC3339Nano timestamps
if err != nil {
fs.Debugf(o, "failed to parse metadata btime: %q: %v", v, err)
} else {
createdTime = api.Rfc3339Time(t).String() // jottacloud api wants RFC3339 timestamps
}
}
if v, ok := meta["mtime"]; ok {
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata mtime: %q: %v", v, err)
} else {
modTime = api.Rfc3339Time(t).String()
}
}
}
if modTime == "" { // prefer mtime in meta as Modified time, fallback to source ModTime
modTime = api.Rfc3339Time(src.ModTime(ctx)).String()
}
if createdTime == "" { // if no Created time set same as Modified
createdTime = modTime
}
// use the api to allocate the file first and get resume / deduplication info
var resp *http.Response
@@ -1983,12 +1896,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
Options: options,
ExtraHeaders: make(map[string]string),
}
fileDate := api.Rfc3339Time(src.ModTime(ctx)).String()
// the allocate request
var request = api.AllocateFileRequest{
Bytes: size,
Created: createdTime,
Modified: modTime,
Created: fileDate,
Modified: fileDate,
Md5: md5String,
Path: o.fs.allocatePathRaw(o.remote, true),
}
@@ -2003,10 +1917,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// If the file state is INCOMPLETE and CORRUPT, we must upload it.
// Else, if the file state is COMPLETE, we don't need to upload it because
// the content is already there, possibly it was created with deduplication,
// and also any metadata changes are already performed by the allocate request.
// If the file state is INCOMPLETE and CORRUPT, try to upload a then
if response.State != "COMPLETED" {
// how much do we still have to upload?
remainingBytes := size - response.ResumePos
@@ -2030,18 +1941,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// send the remaining bytes
_, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
resp, err = o.fs.apiSrv.CallJSON(ctx, &opts, nil, &result)
if err != nil {
return err
}
// Upload response contains main metadata properties (size, md5 and modTime)
// which could be set back to the object, but it does not contain the
// necessary information to set the createTime and updateTime properties,
// so must therefore perform a read instead.
// finally update the meta data
o.hasMetaData = true
o.size = result.Bytes
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
return o.readMetaData(ctx, true)
}
// in any case we must update the object meta data
return o.readMetaData(ctx, true)
return nil
}
func (o *Object) remove(ctx context.Context, hard bool) error {
@@ -2076,22 +1991,6 @@ func (o *Object) Remove(ctx context.Context) error {
return o.remove(ctx, o.fs.opt.HardDelete)
}
// Metadata returns metadata for an object
//
// It should return nil if there is no Metadata
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
err = o.readMetaData(ctx, false)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return nil, err
}
metadata.Set("btime", o.createTime.Format(time.RFC3339Nano)) // metadata timestamps should be RFC3339Nano
metadata.Set("mtime", o.modTime.Format(time.RFC3339Nano))
metadata.Set("utime", o.updateTime.Format(time.RFC3339Nano))
metadata.Set("content-type", o.mimeType)
return metadata, nil
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
@@ -2106,5 +2005,4 @@ var (
_ fs.CleanUpper = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
_ fs.Metadataer = (*Object)(nil)
)

View File

@@ -1,17 +1,11 @@
package jottacloud
import (
"context"
"crypto/md5"
"fmt"
"io"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -46,48 +40,3 @@ func TestReadMD5(t *testing.T) {
})
}
}
func (f *Fs) InternalTestMetadata(t *testing.T) {
ctx := context.Background()
contents := random.String(1000)
item := fstest.NewItem("test-metadata", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
utime := time.Now()
metadata := fs.Metadata{
"btime": "2009-05-06T04:05:06.499999999Z",
"mtime": "2010-06-07T08:09:07.599999999Z",
//"utime" - read-only
//"content-type" - read-only
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, contents, true, "text/html", metadata)
defer func() {
assert.NoError(t, obj.Remove(ctx))
}()
o := obj.(*Object)
gotMetadata, err := o.Metadata(ctx)
require.NoError(t, err)
for k, v := range metadata {
got := gotMetadata[k]
switch k {
case "btime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "mtime":
assert.True(t, fstest.Time(v).Truncate(f.Precision()).Equal(fstest.Time(got)), fmt.Sprintf("btime not equal want %v got %v", v, got))
case "utime":
gotUtime := fstest.Time(got)
dt := gotUtime.Sub(utime)
assert.True(t, dt < time.Minute && dt > -time.Minute, fmt.Sprintf("utime more than 1 minute out want %v got %v delta %v", utime, gotUtime, dt))
assert.True(t, fstest.Time(v).Equal(fstest.Time(got)))
case "content-type":
assert.True(t, o.MimeType(ctx) == got)
default:
assert.Equal(t, v, got, k)
}
}
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -13,7 +13,6 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
@@ -244,7 +243,7 @@ type Fs struct {
precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string
xattrSupported atomic.Int32 // whether xattrs are supported
xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error)
@@ -292,7 +291,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
lstat: os.Lstat,
}
if xattrSupported {
f.xattrSupported.Store(1)
f.xattrSupported = 1
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
@@ -642,13 +641,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
//
// If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
localPath := f.localPath(dir)
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
return os.Remove(f.localPath(dir))
}
// Precision of the file system

View File

@@ -6,6 +6,7 @@ package local
import (
"fmt"
"strings"
"sync/atomic"
"syscall"
"github.com/pkg/xattr"
@@ -27,7 +28,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported
if f.xattrSupported.CompareAndSwap(1, 0) {
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
}
return true
@@ -40,7 +41,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// It doesn't return any attributes owned by this backend in
// metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil, nil
}
var list []string
@@ -89,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
//
// It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil
}
for k, value := range metadata {

View File

@@ -206,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can reuse and share
// cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing

View File

@@ -99,16 +99,6 @@ type ItemReference struct {
DriveType string `json:"driveType"` // Type of the drive, Read-Only
}
// GetID returns a normalized ID of the item
// If DriveID is known it will be prefixed to the ID with # separator
// Can be parsed using onedrive.parseNormalizedID(normalizedID)
func (i *ItemReference) GetID() string {
if !strings.Contains(i.ID, "#") {
return i.DriveID + "#" + i.ID
}
return i.ID
}
// RemoteItemFacet groups data needed to reference a OneDrive remote item
type RemoteItemFacet struct {
ID string `json:"id"` // The unique identifier of the item within the remote Drive. Read-only.
@@ -195,8 +185,8 @@ type Item struct {
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
}
// DeltaResponse is the response to the view delta method
type DeltaResponse struct {
// ViewDeltaResponse is the response to the view delta method
type ViewDeltaResponse struct {
Value []Item `json:"value"` // An array of Item objects which have been created, modified, or deleted.
NextLink string `json:"@odata.nextLink"` // A URL to retrieve the next available page of changes.
DeltaLink string `json:"@odata.deltaLink"` // A URL returned instead of @odata.nextLink after all current changes have been returned. Used to read the next set of changes in the future.
@@ -448,27 +438,3 @@ type Version struct {
type VersionsResponse struct {
Versions []Version `json:"value"`
}
// DriveResource is returned from /me/drive
type DriveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
// DrivesResponse is returned from /sites/{siteID}/drives",
type DrivesResponse struct {
Drives []DriveResource `json:"value"`
}
// SiteResource is part of the response from from "/sites/root:"
type SiteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
// SiteResponse is returned from "/sites/root:"
type SiteResponse struct {
Sites []SiteResource `json:"value"`
}

View File

@@ -371,6 +371,28 @@ file.
})
}
type driveResource struct {
DriveID string `json:"id"`
DriveName string `json:"name"`
DriveType string `json:"driveType"`
}
type drivesResponse struct {
Drives []driveResource `json:"value"`
}
type siteResource struct {
SiteID string `json:"id"`
SiteName string `json:"displayName"`
SiteURL string `json:"webUrl"`
}
type siteResponse struct {
Sites []siteResource `json:"value"`
}
type deltaResponse struct {
DeltaLink string `json:"@odata.deltaLink"`
Value []api.Item `json:"value"`
}
// Get the region and graphURL from the config
func getRegionURL(m configmap.Mapper) (region, graphURL string) {
region, _ = m.Get("region")
@@ -397,7 +419,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
RootURL: graphURL,
Path: "/sites/root:" + opt.relativePath,
}
site := api.SiteResource{}
site := siteResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &site)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available site by relative path: %v", err))
@@ -414,7 +436,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
}
}
drives := api.DrivesResponse{}
drives := drivesResponse{}
// We don't have the final ID yet?
// query Microsoft Graph
@@ -427,7 +449,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
// Also call /me/drive as sometimes /me/drives doesn't return it #4068
if opt.opts.Path == "/me/drives" {
opt.opts.Path = "/me/drive"
meDrive := api.DriveResource{}
meDrive := driveResource{}
_, err := srv.CallJSON(ctx, &opt.opts, nil, &meDrive)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available drives: %v", err))
@@ -446,7 +468,7 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
}
}
} else {
drives.Drives = append(drives.Drives, api.DriveResource{
drives.Drives = append(drives.Drives, driveResource{
DriveID: opt.finalDriveID,
DriveName: "Chosen Drive ID",
DriveType: "drive",
@@ -550,18 +572,15 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
Examples:
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
`)
case "url_end":
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: match[1],
relativePath: "/sites/" + match[1],
})
}
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
@@ -583,7 +602,7 @@ Examples:
Path: "/sites?search=" + searchTerm,
}
sites := api.SiteResponse{}
sites := siteResponse{}
_, err := srv.CallJSON(ctx, &opts, nil, &sites)
if err != nil {
return fs.ConfigError("choose_type", fmt.Sprintf("Failed to query available sites: %v", err))
@@ -1095,29 +1114,32 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// If an error is returned then processing stops
type listAllFn func(*api.Item) error
// Should return true to finish processing
type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
//
// This listing function works on both normal listings and delta listings
func (f *Fs) _listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn, opts *rest.Opts, result any, pValue *[]api.Item, pNextLink *string) (err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
OUTER:
for {
var result api.ListChildrenResponse
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, opts, nil, result)
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("couldn't list files: %w", err)
return found, fmt.Errorf("couldn't list files: %w", err)
}
if len(*pValue) == 0 {
if len(result.Value) == 0 {
break
}
for i := range *pValue {
item := &(*pValue)[i]
for i := range result.Value {
item := &result.Value[i]
isFolder := item.GetFolder() != nil
if isFolder {
if filesOnly {
@@ -1132,60 +1154,18 @@ func (f *Fs) _listAll(ctx context.Context, dirID string, directoriesOnly bool, f
continue
}
item.Name = f.opt.Enc.ToStandardName(item.GetName())
err = fn(item)
if err != nil {
return err
if fn(item) {
found = true
break OUTER
}
}
if *pNextLink == "" {
if result.NextLink == "" {
break
}
opts.Path = ""
opts.Parameters = nil
opts.RootURL = *pNextLink
// reset results
*pNextLink = ""
*pValue = nil
opts.RootURL = result.NextLink
}
return nil
}
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (err error) {
// Top parameter asks for bigger pages of data
// https://dev.onedrive.com/odata/optional-query-parameters.htm
opts := f.newOptsCall(dirID, "GET", fmt.Sprintf("/children?$top=%d", f.opt.ListChunk))
var result api.ListChildrenResponse
return f._listAll(ctx, dirID, directoriesOnly, filesOnly, fn, &opts, &result, &result.Value, &result.NextLink)
}
// Convert a list item into a DirEntry
//
// Can return nil for an item which should be skipped
func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (entry fs.DirEntry, err error) {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return nil, nil
}
remote := path.Join(dir, info.GetName())
folder := info.GetFolder()
if folder != nil {
// cache the directory ID for later lookups
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
entry = d
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
return nil, err
}
entry = o
}
return entry, nil
return
}
// List the objects and directories in dir into entries. The
@@ -1202,137 +1182,41 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if err != nil {
return nil, err
}
err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) error {
entry, err := f.itemToDirEntry(ctx, dir, info)
if err == nil {
entries = append(entries, entry)
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
if !f.opt.ExposeOneNoteFiles && info.GetPackageType() == api.PackageTypeOneNote {
fs.Debugf(info.Name, "OneNote file not shown in directory listing")
return false
}
return err
remote := path.Join(dir, info.GetName())
folder := info.GetFolder()
if folder != nil {
// cache the directory ID for later lookups
id := info.GetID()
f.dirCache.Put(remote, id)
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
d.SetItems(folder.ChildCount)
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
})
if err != nil {
return nil, err
}
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
// Make sure this ID is in the directory cache
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
// ListR only works at the root of a onedrive, not on a folder
// So we have to filter things outside of the root which is
// inefficient.
list := walk.NewListRHelper(callback)
// list a folder conventionally - used for shared folders
var listFolder func(dir string) error
listFolder = func(dir string) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
if _, isDir := entry.(fs.Directory); isDir {
err = listFolder(entry.Remote())
if err != nil {
return err
}
}
}
return nil
}
// This code relies on the fact that directories are sent before their children. This isn't
// mentioned in the docs though, so maybe it shouldn't be relied on.
seen := map[string]struct{}{}
fn := func(info *api.Item) error {
var parentPath string
var ok bool
id := info.GetID()
// The API can produce duplicates, so skip them
if _, found := seen[id]; found {
return nil
}
seen[id] = struct{}{}
// Skip the root directory
if id == directoryID {
return nil
}
// Skip deleted items
if info.Deleted != nil {
return nil
}
dirID := info.GetParentReference().GetID()
// Skip files that don't have their parent directory
// cached as they are outside the root.
parentPath, ok = f.dirCache.GetInv(dirID)
if !ok {
return nil
}
// Skip files not under the root directory
remote := path.Join(parentPath, info.GetName())
if dir != "" && !strings.HasPrefix(remote, dir+"/") {
return nil
}
entry, err := f.itemToDirEntry(ctx, parentPath, info)
if err != nil {
return err
}
err = list.Add(entry)
if err != nil {
return err
}
// If this is a shared folder, we'll need list it too
if info.RemoteItem != nil && info.RemoteItem.Folder != nil {
fs.Debugf(remote, "Listing shared directory")
return listFolder(remote)
}
return nil
}
opts := rest.Opts{
Method: "GET",
Path: "/root/delta",
Parameters: map[string][]string{
// "token": {token},
"$top": {fmt.Sprintf("%d", f.opt.ListChunk)},
},
}
var result api.DeltaResponse
err = f._listAll(ctx, "", false, false, fn, &opts, &result, &result.Value, &result.NextLink)
if err != nil {
return err
}
return list.Flush()
}
// Creates from the parameters passed in a half finished Object which
// must have setMetaData called on it
//
@@ -1401,12 +1285,15 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
if check {
// check to see if there are any items
err := f.listAll(ctx, rootID, false, false, func(item *api.Item) error {
return fs.ErrorDirectoryNotEmpty
found, err := f.listAll(ctx, rootID, false, false, func(item *api.Item) bool {
return true
})
if err != nil {
return err
}
if found {
return fs.ErrorDirectoryNotEmpty
}
}
err = f.deleteObject(ctx, rootID)
if err != nil {
@@ -2591,7 +2478,7 @@ func (f *Fs) changeNotifyStartPageToken(ctx context.Context) (nextDeltaToken str
return
}
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta api.DeltaResponse, err error) {
func (f *Fs) changeNotifyNextChange(ctx context.Context, token string) (delta deltaResponse, err error) {
opts := f.buildDriveDeltaOpts(token)
_, err = f.srv.CallJSON(ctx, &opts, nil, &delta)
@@ -2710,7 +2597,6 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = &Object{}
_ fs.IDer = &Object{}

View File

@@ -767,17 +767,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
if apiError, ok := err.(*Error); ok {
// Work around a bug maybe in opendrive or maybe in rclone.
//
// We should know whether the folder exists or not by the call to
// FindDir above so exactly why it is not found here is a mystery.
//
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
if apiError.Info.Message == "Folder is already deleted" {
return fs.DirEntries{}, nil
}
}
return nil, fmt.Errorf("failed to get folder list: %w", err)
}

View File

@@ -13,6 +13,7 @@ import (
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
)
const (
@@ -127,3 +128,18 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -6,7 +6,6 @@ package oracleobjectstorage
import (
"context"
"fmt"
"sort"
"strings"
"time"
@@ -197,32 +196,6 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
}
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
if err != nil {
return nil, err
}
if len(pastUploads) > 0 {
sort.Slice(pastUploads, func(i, j int) bool {
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
})
return pastUploads[:1], nil
}
return nil, err
}
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
@@ -244,13 +217,7 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
if exact {
if *item.Object == directory {
uploads = append(uploads, &response.Items[index])
}
} else {
uploads = append(uploads, &response.Items[index])
}
uploads = append(uploads, &response.Items[index])
}
if response.OpcNextPage == nil {
break
@@ -259,34 +226,3 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
}
return uploads, nil
}
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
req := objectstorage.ListMultipartUploadPartsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
Limit: common.Int(1000),
}
var response objectstorage.ListMultipartUploadPartsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploadParts(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return uploadedParts, err
}
for _, item := range response.Items {
uploadedParts[*item.PartNumber] = item
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploadedParts, nil
}

View File

@@ -1,441 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/net/http/httpguts"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
)
var warnStreamUpload sync.Once
// Info needed for an upload
type uploadInfo struct {
req *objectstorage.PutObjectRequest
md5sumHex string
}
type objectChunkWriter struct {
chunkSize int64
size int64
f *Fs
bucket *string
key *string
uploadID *string
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
partsToCommitMu sync.Mutex
existingParts map[int]objectstorage.MultipartUploadPartSummary
eTag string
md5sMu sync.Mutex
md5s []byte
ui uploadInfo
o *Object
}
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
return err
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(
ctx context.Context,
remote string,
src fs.ObjectInfo,
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
if err != nil {
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
}
bucketName, bucketPath := o.split()
chunkWriter := &objectChunkWriter{
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: &bucketName,
key: &bucketPath,
uploadID: &uploadID,
existingParts: existingParts,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
if existing, ok := w.existingParts[ossPartNumber]; ok {
if md5sum == *existing.Md5 {
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
w.addCompletedPart(existing.PartNumber, existing.Etag)
return currentChunkSize, nil
}
}
req := objectstorage.UploadPartRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
UploadPartNum: common.Int(ossPartNumber),
ContentLength: common.Int64(currentChunkSize),
ContentMD5: common.String(md5sum),
}
w.o.applyPartUploadOptions(w.ui.req, &req)
var resp objectstorage.UploadPartResponse
err = w.f.pacer.Call(func() (bool, error) {
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
req.UploadPartBody = io.NopCloser(reader)
resp, err = w.f.srv.UploadPart(ctx, req)
if err != nil {
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
// retry all chunks once have done the first few
return true, err
}
return false, err
})
if err != nil {
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
}
w.addCompletedPart(&ossPartNumber, resp.ETag)
return currentChunkSize, err
}
// add a part number and etag to the completed parts
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
w.partsToCommitMu.Lock()
defer w.partsToCommitMu.Unlock()
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
PartNum: partNum,
Etag: eTag,
})
}
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
req := objectstorage.CommitMultipartUploadRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
}
req.PartsToCommit = w.partsToCommit
var resp objectstorage.CommitMultipartUploadResponse
err = w.f.pacer.Call(func() (bool, error) {
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
// if multipart is corrupted, we will abort the uploadId
if isMultiPartUploadCorrupted(err) {
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
_ = w.Abort(ctx)
return false, err
}
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
w.eTag = *resp.ETag
hashOfHashes := md5.Sum(w.md5s)
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
gotMultipartMd5 := *resp.OpcMultipartMd5
if wantMultipartMd5 != gotMultipartMd5 {
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
}
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
return nil
}
func isMultiPartUploadCorrupted(err error) bool {
if err == nil {
return false
}
// Check if this oci-err object, and if it is multipart commit error
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetCode() == "InvalidUploadPart" {
return true
}
}
return false
}
func (w *objectChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "Cancelling multipart upload")
err := w.o.fs.abortMultiPartUpload(
ctx,
w.bucket,
w.key,
w.uploadID)
if err != nil {
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
} else {
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
}
return err
}
// addMd5 adds a binary md5 to the md5 calculated so far
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
w.md5sMu.Lock()
defer w.md5sMu.Unlock()
start := chunkNumber * md5.Size
end := start + md5.Size
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
bucket, bucketPath := o.split()
ui.req = &objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucket),
ObjectName: common.String(bucketPath),
}
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
}
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
// merge metadata into request and user metadata
for k, v := range meta {
pv := common.String(v)
k = strings.ToLower(k)
switch k {
case "cache-control":
ui.req.CacheControl = pv
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
ui.req.ContentType = pv
case "tier":
// ignore
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
case "btime":
// write as metadata since we can't set it
ui.req.OpcMeta[k] = v
default:
ui.req.OpcMeta[k] = v
}
}
// Set the mtime in the metadata
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
size := src.Size()
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var md5sumBase64 string
if !isMultipart || !o.fs.opt.DisableChecksum {
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
hashBytes, err := hex.DecodeString(ui.md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if isMultipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
}
}
}
}
// Set the content type if it isn't set already
if ui.req.ContentType == nil {
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
}
if size >= 0 {
ui.req.ContentLength = common.Int64(size)
}
if md5sumBase64 != "" {
ui.req.ContentMD5 = &md5sumBase64
}
o.applyPutOptions(ui.req, options...)
useBYOKPutObject(o.fs, ui.req)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
ui.req.StorageTier = storageTier
}
// Check metadata keys and values are valid
for key, value := range ui.req.OpcMeta {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(ui.req.OpcMeta, key)
} else if value == "" {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(ui.req.OpcMeta, key)
} else if !httpguts.ValidHeaderFieldValue(value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
delete(ui.req.OpcMeta, key)
}
}
return ui, nil
}
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
bucketName, bucketPath := o.split()
f := o.fs
if f.opt.AttemptResumeUpload {
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
if err == nil && len(resumeUploads) > 0 {
uploadID = *resumeUploads[0].UploadId
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
if err == nil {
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
return uploadID, existingParts, err
}
}
}
req := objectstorage.CreateMultipartUploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
}
req.Object = common.String(bucketPath)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyMultipartUploadOptions(putReq, &req)
var resp objectstorage.CreateMultipartUploadResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return "", existingParts, err
}
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
uploadID = *resp.UploadId
fs.Debugf(o, "created new upload id: %v", uploadID)
return uploadID, existingParts, err
}

View File

@@ -4,14 +4,12 @@
package oracleobjectstorage
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strconv"
"strings"
@@ -20,8 +18,10 @@ import (
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
)
// ------------------------------------------------------------
@@ -367,28 +367,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
return resp.HTTPResponse().Body, nil
}
func isZeroLength(streamReader io.Reader) bool {
switch v := streamReader.(type) {
case *bytes.Buffer:
return v.Len() == 0
case *bytes.Reader:
return v.Len() == 0
case *strings.Reader:
return v.Len() == 0
case *os.File:
fi, err := v.Stat()
if err != nil {
return false
}
return fi.Size() == 0
default:
return false
}
}
// Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, _ := o.split()
bucketName, bucketPath := o.split()
err = o.fs.makeBucket(ctx, bucketName)
if err != nil {
return err
@@ -396,24 +377,142 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// determine if we like upload single or multipart.
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
if isZeroLength(in) {
multipart = false
multipart := size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
}
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart {
err = o.uploadMultipart(ctx, src, in)
chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
useBYOKUpload(o.fs, &uploadRequest)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err
}
} else {
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err)
req := objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
}
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
useBYOKPutObject(o.fs, &req)
var resp objectstorage.PutObjectResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
ui.req.PutObjectBody = io.NopCloser(in)
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.PutObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
@@ -492,24 +591,28 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
}
}
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) {
req.ContentType = putReq.ContentType
req.ContentLanguage = putReq.ContentLanguage
req.ContentEncoding = putReq.ContentEncoding
req.ContentDisposition = putReq.ContentDisposition
req.CacheControl = putReq.CacheControl
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta)
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
}
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) {
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.Metadata[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func metadataWithOpcPrefix(src map[string]string) map[string]string {

View File

@@ -13,10 +13,9 @@ import (
const (
maxSizeForCopy = 4768 * 1024 * 1024
maxUploadParts = 10000
defaultUploadConcurrency = 10
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
@@ -56,14 +55,12 @@ type Options struct {
ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxUploadParts int `config:"max_upload_parts"`
UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
AttemptResumeUpload bool `config:"attempt_resume_upload"`
NoCheckBucket bool `config:"no_check_bucket"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
@@ -160,8 +157,9 @@ The minimum is 0 and the maximum is 5 GiB.`,
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
as multipart uploads using this chunk size.
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
@@ -183,20 +181,6 @@ statistics displayed with "-P" flag.
`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "max_upload_parts",
Help: `Maximum number of parts in a multipart upload.
This option defines the maximum number of multipart chunks to use
when doing a multipart upload.
OCI has max parts limit of 10,000 chunks.
Rclone will automatically increase the chunk size when uploading a
large file of a known size to stay below this number of chunks limit.
`,
Default: maxUploadParts,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
@@ -254,24 +238,12 @@ to start uploading.`,
encoder.EncodeDot,
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "attempt_resume_upload",
Help: `If true attempt to resume previously started multipart upload for the object.
This will be helpful to speed up multipart transfers by resuming uploads from past session.
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
aborted and a new multipart upload is started with the new chunk size.
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
`,
Default: false,
Advanced: true,

View File

@@ -318,6 +318,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue
}
remote = remote[len(prefix):]
@@ -557,15 +558,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
})
}
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
if uploadID == nil || *uploadID == "" {
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == "" {
return nil
}
request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: bucketName,
ObjectName: bucketPath,
UploadId: uploadID,
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request)
@@ -588,7 +589,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
@@ -683,13 +684,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -1215,7 +1215,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
return nil, fmt.Errorf("failed to upload: %w", err)
}
// refresh uploaded file info
// Compared to `newfile.File` this upgrades several fields...
// Compared to `newfile.File` this upgrades several feilds...
// audit, links, modified_time, phase, revision, and web_content_link
return f.getFile(ctx, newfile.File.ID)
}

View File

@@ -1,4 +1,3 @@
// Package protondrive implements the Proton Drive backend
package protondrive
import (
@@ -6,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"log"
"path"
"strings"
"time"
@@ -38,15 +38,15 @@ const (
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
clientUIDKey = "client_uid"
clientAccessTokenKey = "client_access_token"
clientRefreshTokenKey = "client_refresh_token"
clientSaltedKeyPassKey = "client_salted_key_pass"
clientUIDKey = "clientUID"
clientAccessTokenKey = "clientAccessToken"
clientRefreshTokenKey = "clientRefreshToken"
clientSaltedKeyPassKey = "clientSaltedKeyPass"
)
var (
errCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size")
errCanNotPurgeRootDirectory = errors.New("can't purge root directory")
ErrCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size")
ErrCanNotPurgeRootDirectory = errors.New("can't purge root directory")
// for the auth/deauth handler
_mapper configmap.Mapper
@@ -61,60 +61,19 @@ func init() {
NewFs: NewFs,
Options: []fs.Option{{
Name: "username",
Help: `The username of your proton account`,
Help: `The username of your proton drive account`,
Required: true,
}, {
Name: "password",
Help: "The password of your proton account.",
Help: "The password of your proton drive account.",
Required: true,
IsPassword: true,
}, {
Name: "mailbox_password",
Help: `The mailbox password of your two-password proton account.
For more information regarding the mailbox password, please check the
following official knowledge base article:
https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
`,
IsPassword: true,
Advanced: true,
}, {
Name: "2fa",
Help: `The 2FA code
The value can also be provided with --protondrive-2fa=000000
The 2FA code of your proton drive account if the account is set up with
two-factor authentication`,
Required: false,
}, {
Name: clientUIDKey,
Help: "Client uid key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: clientAccessTokenKey,
Help: "Client access token key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: clientRefreshTokenKey,
Help: "Client refresh token key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: clientSaltedKeyPassKey,
Help: "Client salted key pass key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -151,8 +110,6 @@ When a file upload is cancelled or failed before completion, a draft will be
created and the subsequent upload of the same file to the same location will be
reported as a conflict.
The value can also be set by --protondrive-replace-existing-draft=true
If the option is set to true, the draft will be replaced and then the upload
operation will restart. If there are other clients also uploading at the same
file location at the same time, the behavior is currently unknown. Need to set
@@ -166,10 +123,6 @@ will be returned, and no upload will happen.`,
Name: "enable_caching",
Help: `Caches the files and folders metadata to reduce API calls
Notice: If you are mounting ProtonDrive as a VFS, please disable this feature,
as the current implementation doesn't update or clear the cache when there are
external changes.
The files and folders on ProtonDrive are represented as links with keyrings,
which can be cached to improve performance and be friendly to the API server.
@@ -181,23 +134,36 @@ cache. Thus, if there are concurrent clients accessing the same mount point,
then we might have a problem with caching the stale data.`,
Advanced: true,
Default: true,
}, {
Name: "skip_signature_verifications",
Help: `Skip signature verifications during decryption operations
During development and beta testing phase, signature verification is a big
source of problem which affects the normal usage.
Before a proper signature-related failure report-only mode is properly
implemented, we currently offer this feature flag to disable verifying
signatures during decryption operations.
`,
Advanced: true,
Default: false,
}},
})
}
// Options defines the configuration for this backend
type Options struct {
Username string `config:"username"`
Password string `config:"password"`
MailboxPassword string `config:"mailbox_password"`
TwoFA string `config:"2fa"`
Username string `config:"username"`
Password string `config:"password"`
TwoFA string `config:"2fa"`
// advanced
Enc encoder.MultiEncoder `config:"encoding"`
ReportOriginalSize bool `config:"original_file_size"`
AppVersion string `config:"app_version"`
ReplaceExistingDraft bool `config:"replace_existing_draft"`
EnableCaching bool `config:"enable_caching"`
Enc encoder.MultiEncoder `config:"encoding"`
ReportOriginalSize bool `config:"original_file_size"`
AppVersion string `config:"app_version"`
ReplaceExistingDraft bool `config:"replace_existing_draft"`
EnableCaching bool `config:"enable_caching"`
SkipSignatureVerifications bool `config:"skip_signature_verifications"`
}
// Fs represents a remote proton drive
@@ -302,12 +268,12 @@ func clearConfigMap(m configmap.Mapper) {
}
func authHandler(auth proton.Auth) {
// fs.Debugf("authHandler called")
// log.Println("authHandler called")
setConfigMap(_mapper, auth.UID, auth.AccessToken, auth.RefreshToken, _saltedKeyPass)
}
func deAuthHandler() {
// fs.Debugf("deAuthHandler called")
// log.Println("deAuthHandler called")
clearConfigMap(_mapper)
}
@@ -318,6 +284,7 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
config.ReplaceExistingDraft = opt.ReplaceExistingDraft
config.EnableCaching = opt.EnableCaching
config.SkipSignatureVerifications = opt.SkipSignatureVerifications
// let's see if we have the cached access credential
uid, accessToken, refreshToken, saltedKeyPass, hasUseReusableLoginCredentials := getConfigMap(m)
@@ -352,7 +319,6 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
config.UseReusableLogin = false
config.FirstLoginCredential.Username = opt.Username
config.FirstLoginCredential.Password = opt.Password
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
config.FirstLoginCredential.TwoFA = opt.TwoFA
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
if err != nil {
@@ -384,14 +350,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
}
if opt.MailboxPassword != "" {
var err error
opt.MailboxPassword, err = obscure.Reveal(opt.MailboxPassword)
if err != nil {
return nil, fmt.Errorf("couldn't decrypt mailbox password: %w", err)
}
}
ci := fs.GetConfig(ctx)
root = strings.Trim(root, "/")
@@ -622,10 +580,12 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return entries, nil
}
// FindLeaf finds a directory of name leaf in the folder with ID pathID
// DirCacher describes an interface for doing the low level directory work
//
// This should be implemented by the backend and will be called by the
// dircache package when appropriate.
//
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) {
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
@@ -644,10 +604,12 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, e
return link.LinkID, true, nil
}
// CreateDir makes a directory with pathID as parent and name leaf
// DirCacher describes an interface for doing the low level directory work
//
// This should be implemented by the backend and will be called by the
// dircache package when appropriate.
//
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) {
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
@@ -675,7 +637,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error)
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
size := src.Size()
if size < 0 {
return nil, errCanNotUploadFileWithUnknownSize
return nil, ErrCanNotUploadFileWithUnknownSize
}
existingObj, err := f.NewObject(ctx, src.Remote())
@@ -774,7 +736,7 @@ func (f *Fs) DirCacheFlush() {
f.protonDrive.ClearCache()
}
// Hashes returns the supported hash types of the filesystem
// Returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA1)
}
@@ -833,7 +795,9 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return *o.digests, nil
}
// sha1 not cached: we fetch and try to obtain the sha1 of the link
// sha1 not cached
log.Println("sha1 not cached")
// we fetch and try to obtain the sha1 of the link
fileSystemAttrs, err := o.fs.protonDrive.GetActiveRevisionAttrsByID(ctx, o.ID())
if err != nil {
return "", err
@@ -855,7 +819,7 @@ func (o *Object) Size() int64 {
return *o.originalSize
}
fs.Debugf(o, "Original file size missing")
fs.Errorf(o, "Original size should exist")
}
return o.size
}
@@ -934,7 +898,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
size := src.Size()
if size < 0 {
return errCanNotUploadFileWithUnknownSize
return ErrCanNotUploadFileWithUnknownSize
}
remote := o.Remote()
@@ -992,7 +956,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
root := path.Join(f.root, dir)
if root == "" {
// we can't remove the root directory, but we can list the directory and delete every folder and file in here
return errCanNotPurgeRootDirectory
return ErrCanNotPurgeRootDirectory
}
folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false)

View File

@@ -1,182 +0,0 @@
// Package api provides types used by the Quatrix API.
package api
import (
"strconv"
"time"
)
// OverwriteOnCopyMode is a conflict resolve mode during copy. Files with conflicting names will be overwritten
const OverwriteOnCopyMode = "overwrite"
// ProfileInfo is a profile info about quota
type ProfileInfo struct {
UserUsed int64 `json:"user_used"`
UserLimit int64 `json:"user_limit"`
AccUsed int64 `json:"acc_used"`
AccLimit int64 `json:"acc_limit"`
}
// IDList is a general object that contains list of ids
type IDList struct {
IDs []string `json:"ids"`
}
// DeleteParams is the request to delete object
type DeleteParams struct {
IDs []string `json:"ids"`
DeletePermanently bool `json:"delete_permanently"`
}
// FileInfoParams is the request to get object's (file or directory) info
type FileInfoParams struct {
ParentID string `json:"parent_id,omitempty"`
Path string `json:"path"`
}
// FileInfo is the response to get object's (file or directory) info
type FileInfo struct {
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
Src string `json:"src"`
Type string `json:"type"`
}
// IsFile returns true if object is a file
// false otherwise
func (fi *FileInfo) IsFile() bool {
if fi == nil {
return false
}
return fi.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (fi *FileInfo) IsDir() bool {
if fi == nil {
return false
}
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
}
// CreateDirParams is the request to create a directory
type CreateDirParams struct {
Target string `json:"target,omitempty"`
Name string `json:"name"`
Resolve bool `json:"resolve"`
}
// File represent metadata about object in Quatrix (file or directory)
type File struct {
ID string `json:"id"`
Created JSONTime `json:"created"`
Modified JSONTime `json:"modified"`
Name string `json:"name"`
ParentID string `json:"parent_id"`
Size int64 `json:"size"`
ModifiedMS JSONTime `json:"modified_ms"`
Type string `json:"type"`
Operations int `json:"operations"`
SubType string `json:"sub_type"`
Content []File `json:"content"`
}
// IsFile returns true if object is a file
// false otherwise
func (f *File) IsFile() bool {
if f == nil {
return false
}
return f.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (f *File) IsDir() bool {
if f == nil {
return false
}
return f.Type == "D" || f.Type == "S" || f.Type == "T"
}
// SetMTimeParams is the request to set modification time for object
type SetMTimeParams struct {
ID string `json:"id,omitempty"`
MTime JSONTime `json:"mtime"`
}
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
type JSONTime time.Time
// MarshalJSON returns time representation in Unix time
func (u JSONTime) MarshalJSON() ([]byte, error) {
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
}
// UnmarshalJSON sets time from Unix time representation
func (u *JSONTime) UnmarshalJSON(data []byte) error {
f, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
t := JSONTime(time.Unix(0, int64(f*1e9)))
*u = t
return nil
}
// String returns Unix time representation of time as string
func (u JSONTime) String() string {
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
}
// DownloadLinkResponse is the response to download-link request
type DownloadLinkResponse struct {
ID string `json:"id"`
}
// UploadLinkParams is the request to get upload-link
type UploadLinkParams struct {
Name string `json:"name"`
ParentID string `json:"parent_id"`
Resolve bool `json:"resolve"`
}
// UploadLinkResponse is the response to upload-link request
type UploadLinkResponse struct {
Name string `json:"name"`
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
UploadKey string `json:"upload_key"`
}
// UploadFinalizeResponse is the response to finalize file method
type UploadFinalizeResponse struct {
FileID string `json:"id"`
ParentID string `json:"parent_id"`
Modified int64 `json:"modified"`
FileSize int64 `json:"size"`
}
// FileModifyParams is the request to get modify file link
type FileModifyParams struct {
ID string `json:"id"`
Truncate int64 `json:"truncate"`
}
// FileCopyMoveOneParams is the request to do server-side copy and move
// can be used for file or directory
type FileCopyMoveOneParams struct {
ID string `json:"file_id"`
Target string `json:"target_id"`
Name string `json:"name"`
MTime JSONTime `json:"mtime"`
Resolve bool `json:"resolve"`
ResolveMode string `json:"resolve_mode"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Quatrix filesystem interface
package quatrix_test
import (
"testing"
"github.com/rclone/rclone/backend/quatrix"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQuatrix:",
NilObject: (*quatrix.Object)(nil),
})
}

View File

@@ -1,108 +0,0 @@
package quatrix
import (
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
// does not have to wait for the slower ones until they finish upload.
type UploadMemoryManager struct {
m sync.Mutex
useDynamicSize bool
shared int64
reserved int64
effectiveTime time.Duration
fileUsage map[string]int64
}
// NewUploadMemoryManager is a constructor for UploadMemoryManager
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
useDynamicSize := true
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
if sharedMemory <= 0 {
sharedMemory = 0
useDynamicSize = false
}
return &UploadMemoryManager{
useDynamicSize: useDynamicSize,
shared: sharedMemory,
reserved: int64(opt.MinimalChunkSize),
effectiveTime: time.Duration(opt.EffectiveUploadTime),
fileUsage: map[string]int64{},
}
}
// Consume -- decide amount of memory to consume
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
if !u.useDynamicSize {
if neededMemory < u.reserved {
return neededMemory
}
return u.reserved
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if found {
u.shared += borrowed
borrowed = 0
}
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize
}
toBorrow := effectiveChunkSize - u.reserved
if toBorrow <= u.shared {
u.shared -= toBorrow
borrowed = toBorrow
return effectiveChunkSize
}
borrowed = u.shared
u.shared = 0
return borrowed + u.reserved
}
// Return returns consumed memory for the previous chunk upload to the memory pool
func (u *UploadMemoryManager) Return(fileID string) {
if !u.useDynamicSize {
return
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if !found {
return
}
u.shared += borrowed
delete(u.fileUsage, fileID)
}

File diff suppressed because it is too large Load Diff

View File

@@ -17,17 +17,17 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
}
func TestRenewalInTimeLimit(t *testing.T) {
var count atomic.Int64
var count int64
renew := NewRenew(100*time.Millisecond, func() error {
count.Add(1)
atomic.AddInt64(&count, 1)
return nil
})
time.Sleep(time.Second)
renew.Shutdown()
// there's no guarantee the CI agent can handle a simple goroutine
renewCount := count.Load()
renewCount := atomic.LoadInt64(&count)
t.Logf("renew count = %d", renewCount)
assert.Greater(t, renewCount, int64(0))
assert.Less(t, renewCount, int64(11))

View File

@@ -232,16 +232,7 @@ E.g. the second example above should be rewritten as:
Default: "",
Help: `Specifies the path or command to run a sftp server on the remote host.
The subsystem option is ignored when server_command is defined.
If adding server_command to the configuration file please note that
it should not be enclosed in quotes, since that will make rclone fail.
A working example is:
[remote_name]
type = sftp
server_command = sudo /usr/libexec/openssh/sftp-server`,
The subsystem option is ignored when server_command is defined.`,
Advanced: true,
}, {
Name: "use_fstat",
@@ -512,7 +503,7 @@ type Fs struct {
drain *time.Timer // used to drain the pool when we stop using the connections
pacer *fs.Pacer // pacer for operations
savedpswd string
sessions atomic.Int32 // count in use sessions
sessions int32 // count in use sessions
}
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -580,17 +571,17 @@ func (c *conn) closed() error {
//
// Call removeSession() when done
func (f *Fs) addSession() {
f.sessions.Add(1)
atomic.AddInt32(&f.sessions, 1)
}
// Show the ssh session is no longer in use
func (f *Fs) removeSession() {
f.sessions.Add(-1)
atomic.AddInt32(&f.sessions, -1)
}
// getSessions shows whether there are any sessions in use
func (f *Fs) getSessions() int32 {
return f.sessions.Load()
return atomic.LoadInt32(&f.sessions)
}
// Open a new connection to the SFTP server.
@@ -789,7 +780,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
if len(opt.SSH) != 0 && ((opt.User != currentUser && opt.User != "") || opt.Host != "" || (opt.Port != "22" && opt.Port != "")) {
if len(opt.SSH) != 0 && (opt.User != "" || opt.Host != "" || opt.Port != "") {
fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)")
}
@@ -845,7 +836,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
//keyPem := env.ShellExpand(opt.KeyPem)
// Add ssh agent-auth if no password or file or key PEM specified
if (len(opt.SSH) == 0 && opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New()
if err != nil {
return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
@@ -1014,7 +1005,7 @@ func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []st
// save it so on reconnection we give back the previous string.
// This removes the ability to let the user correct a mistaken entry,
// but means that reconnects are transparent.
// We'll reuse config.Pass for this, 'cos we know it's not been
// We'll re-use config.Pass for this, 'cos we know it's not been
// specified.
func (f *Fs) getPass() (string, error) {
for f.savedpswd == "" {
@@ -1602,7 +1593,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
fs.Debugf(f, "About path %q", aboutPath)
vfsStats, err = c.sftpClient.StatVFS(aboutPath)
}
f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be reused
f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be re-used
if vfsStats != nil {
total := vfsStats.TotalSpace()
free := vfsStats.FreeSpace()
@@ -2044,7 +2035,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return fmt.Errorf("Update: %w", err)
}
// Hang on to the connection for the whole upload so it doesn't get reused while we are uploading
// Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
o.fs.putSftpConnection(&c, err)

View File

@@ -47,7 +47,7 @@ func (s *sshClientExternal) Close() error {
// NewSession makes a new external SSH connection
func (s *sshClientExternal) NewSession() (sshSession, error) {
session := s.f.newSSHSessionExternal()
session := s.f.newSshSessionExternal()
if s.session == nil {
fs.Debugf(s.f, "ssh external: creating additional session")
}
@@ -77,7 +77,7 @@ type sshSessionExternal struct {
runningSFTP bool
}
func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
func (f *Fs) newSshSessionExternal() *sshSessionExternal {
s := &sshSessionExternal{
f: f,
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net"
"sync/atomic"
"time"
smb2 "github.com/hirochachacha/go-smb2"
@@ -88,17 +89,17 @@ func (c *conn) closed() bool {
//
// Call removeSession() when done
func (f *Fs) addSession() {
f.sessions.Add(1)
atomic.AddInt32(&f.sessions, 1)
}
// Show the SMB session is no longer in use
func (f *Fs) removeSession() {
f.sessions.Add(-1)
atomic.AddInt32(&f.sessions, -1)
}
// getSessions shows whether there are any sessions in use
func (f *Fs) getSessions() int32 {
return f.sessions.Load()
return atomic.LoadInt32(&f.sessions)
}
// Open a new connection to the SMB server.

View File

@@ -9,10 +9,8 @@ import (
"path"
"strings"
"sync"
"sync/atomic"
"time"
smb2 "github.com/hirochachacha/go-smb2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
@@ -142,7 +140,7 @@ type Fs struct {
features *fs.Features // optional features
pacer *fs.Pacer // pacer for operations
sessions atomic.Int32
sessions int32
poolMu sync.Mutex
pool []*conn
drain *time.Timer // used to drain the pool when we stop using the connections
@@ -477,26 +475,6 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
return usage, nil
}
// Wrap a smb2.File with a custom Close method
type closeSession struct {
*smb2.File
close func() error
closed bool
}
// Close the handle and call the custom code
func (c *closeSession) Close() error {
err := c.File.Close()
if !c.closed {
err2 := c.close()
if err == nil {
err = err2
}
c.closed = true
}
return err
}
// OpenWriterAt opens with a handle for random access writes
//
// Pass in the remote desired and the size if known.
@@ -530,19 +508,10 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
o.fs.putConnection(&cn)
return nil, fmt.Errorf("failed to open: %w", err)
}
// Connection is returned in the closeSession.Close method
c := &closeSession{
File: fl,
close: func() error {
o.fs.putConnection(&cn)
return nil
},
}
return c, nil
return fl, nil
}
// Shutdown the backend, closing any background tasks and any

View File

@@ -561,7 +561,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
// returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" && !o.fs.opt.NoLargeObjects {
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err == fs.ErrorObjectNotFound {
// We have a dangling large object here so just return the original metadata

View File

@@ -17,9 +17,8 @@ import (
// This is a wrapped object which returns the Union Fs as its parent
type Object struct {
*upstream.Object
fs *Fs // what this object is part of
co []upstream.Entry
writebackMu sync.Mutex
fs *Fs // what this object is part of
co []upstream.Entry
}
// Directory describes a union Directory
@@ -35,13 +34,6 @@ type entry interface {
candidates() []upstream.Entry
}
// Update o with the contents of newO excluding the lock
func (o *Object) update(newO *Object) {
o.Object = newO.Object
o.fs = newO.fs
o.co = newO.co
}
// UnWrapUpstream returns the upstream Object that this Object is wrapping
func (o *Object) UnWrapUpstream() *upstream.Object {
return o.Object
@@ -75,7 +67,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Update current object
o.update(newO.(*Object))
*o = *newO.(*Object)
return nil
} else if err != nil {
return err
@@ -183,25 +175,6 @@ func (o *Object) SetTier(tier string) error {
return do.SetTier(tier)
}
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
// Need some sort of locking to prevent multiple downloads
o.writebackMu.Lock()
defer o.writebackMu.Unlock()
// FIXME what if correct object is already in o.co
newObj, err := o.Object.Writeback(ctx)
if err != nil {
return nil, err
}
if newObj != nil {
o.Object = newObj
o.co = append(o.co, newObj) // FIXME should this append or overwrite or update?
}
return o.Object.Object.Open(ctx, options...)
}
// ModTime returns the modification date of the directory
// It returns the latest ModTime of all candidates
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {

View File

@@ -877,10 +877,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt,
upstreams: usedUpstreams,
}
err = upstream.Prepare(f.upstreams)
if err != nil {
return nil, err
}
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
if err != nil {
return nil, err

View File

@@ -12,7 +12,7 @@ import (
)
var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt"}
unimplementableObjectMethods = []string{}
)

View File

@@ -16,7 +16,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/operations"
)
var (
@@ -26,6 +25,10 @@ var (
// Fs is a wrap of any fs and its configs
type Fs struct {
// In order to ensure memory alignment on 32-bit architectures
// when this field is accessed through sync/atomic functions,
// it must be the first entry in the struct
cacheExpiry int64 // usage cache expiry time
fs.Fs
RootFs fs.Fs
RootPath string
@@ -34,12 +37,9 @@ type Fs struct {
creatable bool
usage *fs.Usage // Cache the usage
cacheTime time.Duration // cache duration
cacheExpiry atomic.Int64 // usage cache expiry time
cacheMutex sync.RWMutex
cacheOnce sync.Once
cacheUpdate bool // if the cache is updating
writeback bool // writeback to this upstream
writebackFs *Fs // if non zero, writeback to this upstream
}
// Directory describes a wrapped Directory
@@ -73,14 +73,14 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
return nil, err
}
f := &Fs{
RootPath: strings.TrimRight(root, "/"),
Opt: opt,
writable: true,
creatable: true,
cacheTime: time.Duration(opt.CacheTime) * time.Second,
usage: &fs.Usage{},
RootPath: strings.TrimRight(root, "/"),
Opt: opt,
writable: true,
creatable: true,
cacheExpiry: time.Now().Unix(),
cacheTime: time.Duration(opt.CacheTime) * time.Second,
usage: &fs.Usage{},
}
f.cacheExpiry.Store(time.Now().Unix())
if strings.HasSuffix(fsPath, ":ro") {
f.writable = false
f.creatable = false
@@ -89,9 +89,6 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
f.writable = true
f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3]
} else if strings.HasSuffix(fsPath, ":writeback") {
f.writeback = true
fsPath = fsPath[0 : len(fsPath)-len(":writeback")]
}
remote = configName + fsPath
rFs, err := cache.Get(ctx, remote)
@@ -109,29 +106,6 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
return f, err
}
// Prepare the configured upstreams as a group
func Prepare(fses []*Fs) error {
writebacks := 0
var writebackFs *Fs
for _, f := range fses {
if f.writeback {
writebackFs = f
writebacks++
}
}
if writebacks == 0 {
return nil
} else if writebacks > 1 {
return fmt.Errorf("can only have 1 :writeback not %d", writebacks)
}
for _, f := range fses {
if !f.writeback {
f.writebackFs = writebackFs
}
}
return nil
}
// WrapDirectory wraps an fs.Directory to include the info
// of the upstream Fs
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
@@ -322,31 +296,9 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx)
}
// Writeback writes the object back and returns a new object
//
// If it returns nil, nil then the original object is OK
func (o *Object) Writeback(ctx context.Context) (*Object, error) {
if o.f.writebackFs == nil {
return nil, nil
}
newObj, err := operations.Copy(ctx, o.f.writebackFs.Fs, nil, o.Object.Remote(), o.Object)
if err != nil {
return nil, err
}
// newObj could be nil here
if newObj == nil {
fs.Errorf(o, "nil Object returned from operations.Copy")
return nil, nil
}
return &Object{
Object: newObj,
f: o.f,
}, err
}
// About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return nil, ErrUsageFieldNotSupported
@@ -361,7 +313,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
//
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
func (f *Fs) GetFreeSpace() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return math.MaxInt64 - 1, ErrUsageFieldNotSupported
@@ -379,7 +331,7 @@ func (f *Fs) GetFreeSpace() (int64, error) {
//
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
func (f *Fs) GetUsedSpace() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return 0, ErrUsageFieldNotSupported
@@ -395,7 +347,7 @@ func (f *Fs) GetUsedSpace() (int64, error) {
// GetNumObjects get the number of objects of the fs
func (f *Fs) GetNumObjects() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() {
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage()
if err != nil {
return 0, ErrUsageFieldNotSupported
@@ -450,7 +402,7 @@ func (f *Fs) updateUsageCore(lock bool) error {
defer f.cacheMutex.Unlock()
}
// Store usage
f.cacheExpiry.Store(time.Now().Add(f.cacheTime).Unix())
atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
f.usage = usage
return nil
}

View File

@@ -17,6 +17,7 @@ import (
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/backend/zoho/api"
"github.com/rclone/rclone/fs"
@@ -1168,8 +1169,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.id == "" {
return nil, errors.New("can't download - no id")
}
var start, end int64 = 0, o.size
partialContent := false
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
start = x.Offset
partialContent = true
case *fs.RangeOption:
if x.Start >= 0 {
start = x.Start
if x.End > 0 && x.End < o.size {
end = x.End + 1
}
} else {
// {-1, 20} should load the last 20 characters [len-20:len]
start = o.size - x.End
}
partialContent = true
default:
if option.Mandatory() {
fs.Logf(nil, "Unsupported mandatory option: %v", option)
}
}
}
var resp *http.Response
fs.FixRangeOption(options, o.size)
opts := rest.Opts{
Method: "GET",
Path: "/download/" + o.id,
@@ -1182,6 +1206,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, err
}
if partialContent && resp.StatusCode == 200 && resp.Header.Get("Content-Range") == "" {
if start > 0 {
// We need to read and discard the beginning of the data...
_, err = io.CopyN(io.Discard, resp.Body, start)
if err != nil {
if resp != nil {
_ = resp.Body.Close()
}
return nil, err
}
}
// ... and return a limited reader for the remaining of the data
return readers.NewLimitedReadCloser(resp.Body, end-start), nil
}
return resp.Body, nil
}

View File

@@ -6,4 +6,3 @@
<abhi18av@users.noreply.github.com>
<ankur0493@gmail.com>
<agupta@egnyte.com>
<ricci@disroot.org>

View File

@@ -25,7 +25,6 @@ docs = [
"flags.md",
"docker.md",
"bisync.md",
"release_signing.md",
# Keep these alphabetical by full name
"fichier.md",
@@ -62,7 +61,6 @@ docs = [
"opendrive.md",
"oracleobjectstorage.md",
"qingstor.md",
"quatrix.md",
"sia.md",
"swift.md",
"pcloud.md",
@@ -70,7 +68,6 @@ docs = [
"premiumizeme.md",
"protondrive.md",
"putio.md",
"protondrive.md",
"seafile.md",
"sftp.md",
"smb.md",

View File

@@ -614,8 +614,6 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
opt.DryRun = true
case "force":
opt.Force = true
case "create-empty-src-dirs":
opt.CreateEmptySrcDirs = true
case "remove-empty-dirs":
opt.RemoveEmptyDirs = true
case "check-sync-only":
@@ -1165,10 +1163,6 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
b.workDir + slash, "{workdir/}",
b.path1, "{path1/}",
b.path2, "{path2/}",
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
strings.TrimSuffix(b.path2, slash), "{path2}",
b.sessionName, "{session}",
}
if fixSlash {

View File

@@ -27,21 +27,18 @@ import (
// Options keep bisync options
type Options struct {
Resync bool
CheckAccess bool
CheckFilename string
CheckSync CheckSyncMode
CreateEmptySrcDirs bool
RemoveEmptyDirs bool
MaxDelete int // percentage from 0 to 100
Force bool
FiltersFile string
Workdir string
DryRun bool
NoCleanup bool
SaveQueues bool // save extra debugging files (test only flag)
IgnoreListingChecksum bool
Resilient bool
Resync bool
CheckAccess bool
CheckFilename string
CheckSync CheckSyncMode
RemoveEmptyDirs bool
MaxDelete int // percentage from 0 to 100
Force bool
FiltersFile string
Workdir string
DryRun bool
NoCleanup bool
SaveQueues bool // save extra debugging files (test only flag)
}
// Default values
@@ -106,14 +103,11 @@ func init() {
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "")
flags.BoolVarP(cmdFlags, &Opt.CreateEmptySrcDirs, "create-empty-src-dirs", "", Opt.CreateEmptySrcDirs, "Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)", "")
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove empty directories at the final cleanup step.", "")
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
}
// bisync command definition
@@ -216,13 +210,9 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
}
if opt.Resync {
if opt.DryRun {
fs.Infof(nil, "Skipped storing filters file hash to %s as --dry-run is set", hashFile)
} else {
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
return ctx, err
}
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
return ctx, err
}
}

View File

@@ -3,18 +3,13 @@
package bisync
import (
"bytes"
"context"
"fmt"
"path/filepath"
"sort"
"strings"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/cmd/check"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
)
@@ -95,47 +90,6 @@ func (ds *deltaSet) printStats() {
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
}
// check potential conflicts (to avoid renaming if already identical)
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
matches := bilib.Names{}
if filterCheck.HaveFilesFrom() {
fs.Debugf(nil, "There are potential conflicts to check.")
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
if checkopterr != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
return matches, checkopterr
}
defer close()
opt.Match = new(bytes.Buffer)
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
// note that cryptCheck() is not currently exported
fs.Infof(nil, "Checking potential conflicts...")
check := operations.Check(ctxCheck, opt)
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
//reset error count, because we don't want to count check errors as bisync errors
accounting.Stats(ctxCheck).ResetErrors()
//return the list of identical files to check against later
if len(fmt.Sprint(opt.Match)) > 0 {
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
}
if matches.NotEmpty() {
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
} else {
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
}
}
return matches, nil
}
// findDeltas
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
var old, now *fileList
@@ -229,52 +183,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
ctxMove := b.opt.setDryRun(ctx)
// efficient isDir check
// we load the listing just once and store only the dirs
dirs1, dirs1Err := b.listDirsOnly(1)
if dirs1Err != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "Error generating dirsonly list for path1: %v", dirs1Err)
return
}
dirs2, dirs2Err := b.listDirsOnly(2)
if dirs2Err != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "Error generating dirsonly list for path2: %v", dirs2Err)
return
}
// build a list of only the "deltaOther"s so we don't have to check more files than necessary
// this is essentially the same as running rclone check with a --files-from filter, then exempting the --match results from being renamed
// we therefore avoid having to list the same directory more than once.
// we are intentionally overriding DryRun here because we need to perform the check, even during a dry run, or the results would be inaccurate.
// check is a read-only operation by its nature, so it's already "dry" in that sense.
ctxNew, ciCheck := fs.AddConfig(ctx)
ciCheck.DryRun = false
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
for _, file := range ds1.sort() {
d1 := ds1.deltas[file]
if d1.is(deltaOther) {
d2 := ds2.deltas[file]
if d2.is(deltaOther) {
if err := filterCheck.AddFile(file); err != nil {
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
} else {
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
}
}
}
}
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
for _, file := range ds1.sort() {
p1 := path1 + file
p2 := path2 + file
@@ -291,34 +199,22 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
handled.Add(file)
} else if d2.is(deltaOther) {
b.indent("!WARNING", file, "New or changed in both paths")
//if files are identical, leave them alone instead of renaming
if dirs1.has(file) && dirs2.has(file) {
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
} else {
equal := matches.Has(file)
if equal {
fs.Infof(nil, "Files are equal! Skipping: %s", file)
} else {
fs.Debugf(nil, "Files are NOT equal: %s", file)
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
b.critical = true
return
}
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
copy1to2.Add(file + "..path1")
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
return
}
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
copy2to1.Add(file + "..path2")
}
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
b.critical = true
return
}
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
copy1to2.Add(file + "..path1")
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
return
}
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
copy2to1.Add(file + "..path2")
handled.Add(file)
}
} else {
@@ -362,9 +258,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil {
return
}
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
}
if copy1to2.NotEmpty() {
@@ -374,9 +267,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil {
return
}
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
}
if delete1.NotEmpty() {
@@ -386,9 +276,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil {
return
}
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
}
if delete2.NotEmpty() {
@@ -398,9 +285,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil {
return
}
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
}
return

View File

@@ -27,16 +27,11 @@ var rcHelp = makeHelp(`This takes the following parameters
- checkFilename - file name for checkAccess (default: {CHECKFILE})
- maxDelete - abort sync if percentage of deleted files is above
this threshold (default: {MAXDELETE})
- force - Bypass maxDelete safety check and run the sync
- force - maxDelete safety check and run the sync
- checkSync - |true| by default, |false| disables comparison of final listings,
|only| will skip sync, only compare listings from the last run
- createEmptySrcDirs - Sync creation and deletion of empty directories.
(Not compatible with --remove-empty-dirs)
- removeEmptyDirs - remove empty directories at the final cleanup step
- filtersFile - read filtering patterns from a file
- ignoreListingChecksum - Do not use checksums for listings
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
Use at your own risk!
- workdir - server directory for history files (default: {WORKDIR})
- noCleanup - retain working files

View File

@@ -43,11 +43,10 @@ var tzLocal = false
// fileInfo describes a file
type fileInfo struct {
size int64
time time.Time
hash string
id string
flags string
size int64
time time.Time
hash string
id string
}
// fileList represents a listing
@@ -77,18 +76,17 @@ func (ls *fileList) get(file string) *fileInfo {
return ls.info[file]
}
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) {
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string) {
fi := ls.get(file)
if fi != nil {
fi.size = size
fi.time = time
} else {
fi = &fileInfo{
size: size,
time: time,
hash: hash,
id: id,
flags: flags,
size: size,
time: time,
hash: hash,
id: id,
}
ls.info[file] = fi
ls.list = append(ls.list, file)
@@ -154,11 +152,7 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
id = "-"
}
flags := fi.flags
if flags == "" {
flags = "-"
}
flags := "-"
_, err = fmt.Fprintf(file, lineFormat, flags, fi.size, hash, id, time, remote)
if err != nil {
_ = file.Close()
@@ -223,7 +217,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
}
}
if (flags != "-" && flags != "d") || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
if flags != "-" || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
fs.Logf(listing, "Ignoring incorrect line: %q", line)
continue
}
@@ -235,7 +229,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
}
}
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id, flags)
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id)
}
return ls, nil
@@ -259,20 +253,15 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
ci := fs.GetConfig(ctx)
depth := ci.MaxDepth
hashType := hash.None
if !b.opt.IgnoreListingChecksum {
// Currently bisync just honors --ignore-listing-checksum
// (note that this is different from --ignore-checksum)
if !ci.IgnoreChecksum {
// Currently bisync just honors --ignore-checksum
// TODO add full support for checksums and related flags
hashType = f.Hashes().GetOne()
}
ls = newFileList()
ls.hash = hashType
var lock sync.Mutex
listType := walk.ListObjects
if b.opt.CreateEmptySrcDirs {
listType = walk.ListAll
}
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
err = walk.ListR(ctx, f, "", false, depth, walk.ListObjects, func(entries fs.DirEntries) error {
var firstErr error
entries.ForObject(func(o fs.Object) {
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
@@ -287,27 +276,12 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
}
}
time := o.ModTime(ctx).In(TZ)
id := "" // TODO
flags := "-" // "-" for a file and "d" for a directory
id := "" // TODO
lock.Lock()
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags)
ls.put(o.Remote(), o.Size(), time, hashVal, id)
lock.Unlock()
//tr.Done(ctx, nil) // TODO
})
if b.opt.CreateEmptySrcDirs {
entries.ForDir(func(o fs.Directory) {
var (
hashVal string
)
time := o.ModTime(ctx).In(TZ)
id := "" // TODO
flags := "d" // "-" for a file and "d" for a directory
lock.Lock()
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
ls.put(o.Remote(), 0, time, hashVal, id, flags)
lock.Unlock()
})
}
return firstErr
})
if err == nil {
@@ -326,53 +300,5 @@ func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
}
fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing)
b.critical = true
b.retryable = true
return fmt.Errorf("empty %s listing: %s", msg, listing)
}
// listingNum should be 1 for path1 or 2 for path2
func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
listingpath := b.basePath + ".path1.lst-new"
if listingNum == 2 {
listingpath = b.basePath + ".path2.lst-new"
}
if b.opt.DryRun {
listingpath = strings.Replace(listingpath, ".lst-", ".lst-dry-", 1)
}
fs.Debugf(nil, "loading listing for path %d at: %s", listingNum, listingpath)
return b.loadListing(listingpath)
}
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
var fulllisting *fileList
var dirsonly = newFileList()
var err error
if !b.opt.CreateEmptySrcDirs {
return dirsonly, err
}
fulllisting, err = b.loadListingNum(listingNum)
if err != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "Error loading listing to generate dirsonly list: %v", err)
return dirsonly, err
}
for _, obj := range fulllisting.list {
info := fulllisting.get(obj)
if info.flags == "d" {
fs.Debugf(nil, "found a dir: %s", obj)
dirsonly.put(obj, info.size, info.time, info.hash, info.id, info.flags)
} else {
fs.Debugf(nil, "not a dir: %s", obj)
}
}
return dirsonly, err
}

View File

@@ -25,14 +25,13 @@ var ErrBisyncAborted = errors.New("bisync aborted")
// bisyncRun keeps bisync runtime state
type bisyncRun struct {
fs1 fs.Fs
fs2 fs.Fs
abort bool
critical bool
retryable bool
basePath string
workDir string
opt *Options
fs1 fs.Fs
fs2 fs.Fs
abort bool
critical bool
basePath string
workDir string
opt *Options
}
// Bisync handles lock file, performs bisync run and checks exit status
@@ -124,19 +123,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
}
if b.critical {
if b.retryable && b.opt.Resilient {
fs.Errorf(nil, "Bisync critical error: %v", err)
fs.Errorf(nil, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")
} else {
if bilib.FileExists(listing1) {
_ = os.Rename(listing1, listing1+"-err")
}
if bilib.FileExists(listing2) {
_ = os.Rename(listing2, listing2+"-err")
}
fs.Errorf(nil, "Bisync critical error: %v", err)
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
if bilib.FileExists(listing1) {
_ = os.Rename(listing1, listing1+"-err")
}
if bilib.FileExists(listing2) {
_ = os.Rename(listing2, listing2+"-err")
}
fs.Errorf(nil, "Bisync critical error: %v", err)
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
return ErrBisyncAborted
}
if b.abort {
@@ -158,7 +152,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
if err = b.checkSync(listing1, listing2); err != nil {
b.critical = true
b.retryable = true
}
return err
}
@@ -183,7 +176,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
var fctx context.Context
if fctx, err = b.opt.applyFilters(octx); err != nil {
b.critical = true
b.retryable = true
return
}
@@ -196,7 +188,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) {
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
b.critical = true
b.retryable = true
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run")
}
@@ -224,7 +215,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
if err != nil {
b.critical = true
b.retryable = true
return
}
}
@@ -265,7 +255,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
changes1, changes2, err = b.applyDeltas(octx, ds1, ds2)
if err != nil {
b.critical = true
// b.retryable = true // not sure about this one
return err
}
}
@@ -294,7 +283,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
}
if err != nil {
b.critical = true
b.retryable = true
return err
}
@@ -322,7 +310,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
}
if err != nil {
b.critical = true
b.retryable = true
return err
}
}
@@ -354,39 +341,6 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
return err
}
// Check access health on the Path1 and Path2 filesystems
// enforce even though this is --resync
if b.opt.CheckAccess {
fs.Infof(nil, "Checking access health")
ds1 := &deltaSet{
checkFiles: bilib.Names{},
}
ds2 := &deltaSet{
checkFiles: bilib.Names{},
}
for _, file := range filesNow1.list {
if filepath.Base(file) == b.opt.CheckFilename {
ds1.checkFiles.Add(file)
}
}
for _, file := range filesNow2.list {
if filepath.Base(file) == b.opt.CheckFilename {
ds2.checkFiles.Add(file)
}
}
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
if err != nil {
b.critical = true
b.retryable = true
return err
}
}
copy2to1 := []string{}
for _, file := range filesNow2.list {
if !filesNow1.has(file) {
@@ -413,34 +367,11 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
// prevent overwriting Google Doc files (their size is -1)
filterSync.Opt.MinSize = 0
}
if err = sync.CopyDir(ctxSync, b.fs2, b.fs1, b.opt.CreateEmptySrcDirs); err != nil {
if err = sync.Sync(ctxSync, b.fs2, b.fs1, false); err != nil {
b.critical = true
return err
}
if b.opt.CreateEmptySrcDirs {
// copy Path2 back to Path1, for empty dirs
// the fastCopy above cannot include directories, because it relies on --files-from for filtering,
// so instead we'll copy them here, relying on fctx for our filtering.
// This preserves the original resync order for backward compatibility. It is essentially:
// rclone copy Path2 Path1 --ignore-existing
// rclone copy Path1 Path2 --create-empty-src-dirs
// rclone copy Path2 Path1 --create-empty-src-dirs
// although if we were starting from scratch, it might be cleaner and faster to just do:
// rclone copy Path2 Path1 --create-empty-src-dirs
// rclone copy Path1 Path2 --create-empty-src-dirs
fs.Infof(nil, "Resynching Path2 to Path1 (for empty dirs)")
//note copy (not sync) and dst comes before src
if err = sync.CopyDir(ctxSync, b.fs1, b.fs2, b.opt.CreateEmptySrcDirs); err != nil {
b.critical = true
return err
}
}
fs.Infof(nil, "Resync updating listings")
if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil {
b.critical = true

View File

@@ -3,7 +3,6 @@ package bisync
import (
"context"
"fmt"
"sort"
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
@@ -24,7 +23,7 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
}
}
return sync.CopyDir(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
return sync.CopyDir(ctxCopy, fdst, fsrc, false)
}
func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error {
@@ -33,14 +32,7 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
}
transfers := fs.GetConfig(ctx).Transfers
ctxRun, filterDelete := filter.AddConfig(b.opt.setDryRun(ctx))
for _, file := range files.ToList() {
if err := filterDelete.AddFile(file); err != nil {
return err
}
}
ctxRun := b.opt.setDryRun(ctx)
objChan := make(fs.ObjectsChan, transfers)
errChan := make(chan error, 1)
@@ -61,36 +53,6 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
return err
}
// operation should be "make" or "remove"
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, operation string) {
if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") {
candidatesList := candidates.ToList()
if operation == "remove" {
// reverse the sort order to ensure we remove subdirs before parent dirs
sort.Sort(sort.Reverse(sort.StringSlice(candidatesList)))
}
for _, s := range candidatesList {
var direrr error
if dirsList.has(s) { //make sure it's a dir, not a file
if operation == "remove" {
//note: we need to use Rmdirs instead of Rmdir because directories will fail to delete if they have other empty dirs inside of them.
direrr = operations.Rmdirs(ctx, dst, s, false)
} else if operation == "make" {
direrr = operations.Mkdir(ctx, dst, s)
} else {
direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation)
}
if direrr != nil {
fs.Debugf(nil, "Error syncing directory: %v", direrr)
}
}
}
}
}
func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
if !b.opt.SaveQueues {
return nil

View File

@@ -26,7 +26,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if dryRun, err := in.GetBool("dryRun"); err == nil {
ci.DryRun = dryRun
opt.DryRun = dryRun
} else if rc.NotErrParamNotFound(err) {
return nil, err
}
@@ -49,21 +48,12 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if opt.Force, err = in.GetBool("force"); rc.NotErrParamNotFound(err) {
return
}
if opt.CreateEmptySrcDirs, err = in.GetBool("createEmptySrcDirs"); rc.NotErrParamNotFound(err) {
return
}
if opt.RemoveEmptyDirs, err = in.GetBool("removeEmptyDirs"); rc.NotErrParamNotFound(err) {
return
}
if opt.NoCleanup, err = in.GetBool("noCleanup"); rc.NotErrParamNotFound(err) {
return
}
if opt.IgnoreListingChecksum, err = in.GetBool("ignoreListingChecksum"); rc.NotErrParamNotFound(err) {
return
}
if opt.Resilient, err = in.GetBool("resilient"); rc.NotErrParamNotFound(err) {
return
}
if opt.CheckFilename, err = in.GetString("checkFilename"); rc.NotErrParamNotFound(err) {
return
@@ -79,9 +69,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if rc.NotErrParamNotFound(err) {
return nil, err
}
if checkSync == "" {
checkSync = "true"
}
if err := opt.CheckSync.Set(checkSync); err != nil {
return nil, err
}

View File

@@ -4,7 +4,7 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"

View File

@@ -4,5 +4,5 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"

View File

@@ -4,7 +4,7 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"

View File

@@ -4,5 +4,5 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"

View File

@@ -68,11 +68,6 @@ INFO : - Path2 File was deleted - file7.txt
INFO : - Path2 File was deleted - file8.txt
INFO : Path2: 7 changes: 1 new, 3 newer, 0 older, 3 deleted
INFO : Applying changes
INFO : Checking potential conflicts...
ERROR : file5.txt: md5 differ
NOTICE: Local file system at {path2}: 1 differences found
NOTICE: Local file system at {path2}: 1 errors while checking
INFO : Finished checking the potential conflicts. 1 differences found
INFO : - Path1 Queue copy to Path2 - {path2/}file11.txt
INFO : - Path1 Queue copy to Path2 - {path2/}file2.txt
INFO : - Path2 Queue delete - {path2/}file4.txt

View File

@@ -1 +1 @@
This file is newer and not equal to 5R
This file is newer

View File

@@ -1 +1 @@
This file is newer and not equal to 5L
This file is newer

View File

@@ -39,12 +39,10 @@ Bisync error: bisync aborted
(10) : move-listings path2-missing
(11) : test 3. put the remote subdir .chk_file back, run resync.
(12) : copy-file {path1/}subdir/.chk_file {path2/}subdir/
(12) : copy-file {path1/}subdir/.chk_file {path2/}
(13) : bisync check-access resync check-filename=.chk_file
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying unique Path2 files to Path1
INFO : Checking access health
INFO : Found 2 matching ".chk_file" files on both paths
INFO : Resynching Path1 to Path2
INFO : Resync updating listings
INFO : Bisync successful

View File

@@ -20,7 +20,7 @@ bisync check-access check-filename=.chk_file
move-listings path2-missing
test 3. put the remote subdir .chk_file back, run resync.
copy-file {path1/}subdir/.chk_file {path2/}subdir/
copy-file {path1/}subdir/.chk_file {path2/}
bisync check-access resync check-filename=.chk_file
test 4. run sync with check-access. should pass.

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

Some files were not shown because too many files have changed in this diff Show More