1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-20 19:33:28 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
444a6e6d2d Make http servers obey --dump headers,bodies 2023-07-30 13:26:20 +01:00
428 changed files with 60005 additions and 74044 deletions

View File

@@ -32,7 +32,7 @@ jobs:
include: include:
- job_name: linux - job_name: linux
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.21.0-rc.3'
gotags: cmount gotags: cmount
build_flags: '-include "^linux/"' build_flags: '-include "^linux/"'
check: true check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386 - job_name: linux_386
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.21.0-rc.3'
goarch: 386 goarch: 386
gotags: cmount gotags: cmount
quicktest: true quicktest: true
- job_name: mac_amd64 - job_name: mac_amd64
os: macos-11 os: macos-11
go: '1.21' go: '1.21.0-rc.3'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo' build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64 - job_name: mac_arm64
os: macos-11 os: macos-11
go: '1.21' go: '1.21.0-rc.3'
gotags: 'cmount' gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib' build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true deploy: true
- job_name: windows - job_name: windows
os: windows-latest os: windows-latest
go: '1.21' go: '1.21.0-rc.3'
gotags: cmount gotags: cmount
cgo: '0' cgo: '0'
build_flags: '-include "^windows/"' build_flags: '-include "^windows/"'
@@ -76,7 +76,7 @@ jobs:
- job_name: other_os - job_name: other_os
os: ubuntu-latest os: ubuntu-latest
go: '1.21' go: '1.21.0-rc.3'
build_flags: '-exclude "^(windows/|darwin/|linux/)"' build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true compile_all: true
deploy: true deploy: true
@@ -99,7 +99,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -232,7 +232,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Code quality test - name: Code quality test
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3
@@ -244,7 +244,7 @@ jobs:
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: '1.21' go-version: '1.21.0-rc.3'
check-latest: true check-latest: true
- name: Install govulncheck - name: Install govulncheck
@@ -261,7 +261,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
@@ -269,7 +269,7 @@ jobs:
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: '1.21' go-version: '1.21.0-rc.3'
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v3

View File

@@ -10,35 +10,26 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build image job name: Build image job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
username: ${{ secrets.DOCKERHUB_USERNAME }} username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }} password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Extract metadata (tags, labels) for Docker - name: Extract metadata (tags, labels) for Docker
id: meta id: meta
uses: docker/metadata-action@v5 uses: docker/metadata-action@v4
with: with:
images: ghcr.io/${{ github.repository }} images: ghcr.io/${{ github.repository }}
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v2
with: with:
registry: ghcr.io registry: ghcr.io
# This is the user that triggered the Workflow. In this case, it will # This is the user that triggered the Workflow. In this case, it will
@@ -51,12 +42,9 @@ jobs:
# See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret # See https://docs.github.com/en/actions/security-guides/automatic-token-authentication#about-the-github_token-secret
# for more detailed information. # for more detailed information.
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Show disk usage
shell: bash
run: |
df -h .
- name: Build and publish image - name: Build and publish image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v4
with: with:
file: Dockerfile file: Dockerfile
context: . context: .
@@ -66,12 +54,8 @@ jobs:
rclone/rclone:beta rclone/rclone:beta
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
cache-from: type=gha, scope=${{ github.workflow }} cache-from: type=gha
cache-to: type=gha, mode=max, scope=${{ github.workflow }} cache-to: type=gha,mode=max
provenance: false provenance: false
# Eventually cache will need to be cleared if builds more frequent than once a week # Eventually cache will need to be cleared if builds more frequent than once a week
# https://github.com/docker/build-push-action/issues/252 # https://github.com/docker/build-push-action/issues/252
- name: Show disk usage
shell: bash
run: |
df -h .

View File

@@ -10,17 +10,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build image job name: Build image job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Get actual patch version - name: Get actual patch version
@@ -48,17 +39,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build docker plugin job name: Build docker plugin job
steps: steps:
- name: Free some space
shell: bash
run: |
df -h .
# Remove android SDK
sudo rm -rf /usr/local/lib/android || true
# Remove .net runtime
sudo rm -rf /usr/share/dotnet || true
df -h .
- name: Checkout master - name: Checkout master
uses: actions/checkout@v4 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

View File

@@ -5,7 +5,7 @@ on:
jobs: jobs:
publish: publish:
runs-on: ubuntu-latest runs-on: windows-latest # Action can only run on Windows
steps: steps:
- uses: vedantmgoyal2009/winget-releaser@v2 - uses: vedantmgoyal2009/winget-releaser@v2
with: with:

2
.gitignore vendored
View File

@@ -8,10 +8,10 @@ rclone.iml
.idea .idea
.history .history
*.test *.test
*.log
*.iml *.iml
fuzz-build.zip fuzz-build.zip
*.orig *.orig
*.rej *.rej
Thumbs.db Thumbs.db
__pycache__ __pycache__
.DS_Store

View File

@@ -33,67 +33,23 @@ issues:
- staticcheck - staticcheck
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated' text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
# don't disable the revive messages about comments on exported functions
include:
- EXC0012
- EXC0013
- EXC0014
- EXC0015
run: run:
# timeout for analysis, e.g. 30s, 5m, default is 1m # timeout for analysis, e.g. 30s, 5m, default is 1m
timeout: 10m timeout: 10m
linters-settings: linters-settings:
revive: revive:
# setting rules seems to disable all the rules, so re-enable them here
rules: rules:
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: true
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: increment-decrement
disabled: true
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: true
- name: superfluous-else
disabled: true
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code - name: unreachable-code
disabled: true disabled: true
- name: unused-parameter - name: unused-parameter
disabled: true disabled: true
- name: var-declaration - name: empty-block
disabled: false disabled: true
- name: var-naming - name: redefines-builtin-id
disabled: false disabled: true
- name: superfluous-else
disabled: true
stylecheck: stylecheck:
# Only enable the checks performed by the staticcheck stand-alone tool, # Only enable the checks performed by the staticcheck stand-alone tool,
# as documented here: https://staticcheck.io/docs/configuration/options/#checks # as documented here: https://staticcheck.io/docs/configuration/options/#checks

View File

@@ -428,19 +428,14 @@ Getting going
* box is a good one to start from if you have a directory-based remote * box is a good one to start from if you have a directory-based remote
* b2 is a good one to start from if you have a bucket-based remote * b2 is a good one to start from if you have a bucket-based remote
* Add your remote to the imports in `backend/all/all.go` * Add your remote to the imports in `backend/all/all.go`
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead. * HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
* Try to implement as many optional methods as possible as it makes the remote more usable. * Try to implement as many optional methods as possible as it makes the remote more usable.
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed * Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
* `rclone purge -v TestRemote:rclone-info` * `rclone purge -v TestRemote:rclone-info`
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info` * `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json` * `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
* open `remote.csv` in a spreadsheet and examine * open `remote.csv` in a spreadsheet and examine
Important:
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
Unit tests Unit tests
* Create a config entry called `TestRemote` for the unit tests to use * Create a config entry called `TestRemote` for the unit tests to use
@@ -474,7 +469,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
* `README.md` - main GitHub page * `README.md` - main GitHub page
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) * `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
* make sure this has the `autogenerated options` comments in (see your reference backend docs) * make sure this has the `autogenerated options` comments in (see your reference backend docs)
* update them in your backend with `bin/make_backend_docs.py remote` * update them with `make backenddocs` - revert any changes in other backends
* `docs/content/overview.md` - overview docs * `docs/content/overview.md` - overview docs
* `docs/content/docs.md` - list of remotes in config section * `docs/content/docs.md` - list of remotes in config section
* `docs/content/_index.md` - front page of rclone.org * `docs/content/_index.md` - front page of rclone.org

View File

@@ -19,8 +19,6 @@ Current active maintainers of rclone are:
| wiserain | @wiserain | pikpak backend | | wiserain | @wiserain | pikpak backend |
| albertony | @albertony | | | albertony | @albertony | |
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend | | Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
| Hideo Aoyama | @boukendesho | snap packaging |
| nielash | @nielash | bisync |
**This is a work in progress Draft** **This is a work in progress Draft**

31515
MANUAL.html generated

File diff suppressed because it is too large Load Diff

6970
MANUAL.md generated

File diff suppressed because it is too large Load Diff

30968
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -73,10 +73,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* PikPak [:page_facing_up:](https://rclone.org/pikpak/) * PikPak [:page_facing_up:](https://rclone.org/pikpak/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/) * put.io [:page_facing_up:](https://rclone.org/putio/)
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu) * Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)

View File

@@ -90,28 +90,6 @@ Now
* git commit -a -v -m "Changelog updates from Version ${NEW_TAG}" * git commit -a -v -m "Changelog updates from Version ${NEW_TAG}"
* git push * git push
## Update the website between releases
Create an update website branch based off the last release
git co -b update-website
If the branch already exists, double check there are no commits that need saving.
Now reset the branch to the last release
git reset --hard v1.64.0
Create the changes, check them in, test with `make serve` then
make upload_test_website
Check out https://test.rclone.org and when happy
make upload_website
Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker ## Making a manual build of docker
The rclone docker image should autobuild on via GitHub actions. If it doesn't The rclone docker image should autobuild on via GitHub actions. If it doesn't

View File

@@ -1 +1 @@
v1.64.2 v1.64.0

View File

@@ -41,7 +41,6 @@ import (
_ "github.com/rclone/rclone/backend/protondrive" _ "github.com/rclone/rclone/backend/protondrive"
_ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/putio"
_ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/qingstor"
_ "github.com/rclone/rclone/backend/quatrix"
_ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile" _ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp" _ "github.com/rclone/rclone/backend/sftp"

View File

@@ -5,6 +5,7 @@
package azureblob package azureblob
import ( import (
"bytes"
"context" "context"
"crypto/md5" "crypto/md5"
"encoding/base64" "encoding/base64"
@@ -17,7 +18,6 @@ import (
"net/url" "net/url"
"os" "os"
"path" "path"
"sort"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -33,6 +33,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/chunksize" "github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
@@ -45,8 +46,10 @@ import (
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
) )
const ( const (
@@ -67,16 +70,12 @@ const (
emulatorAccount = "devstoreaccount1" emulatorAccount = "devstoreaccount1"
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1" emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
) )
var ( var (
errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete")) errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete"))
// Take this when changing or reading metadata.
//
// It acts as global metadata lock so we don't bloat Object
// with an extra lock that will only very rarely be contended.
metadataMu sync.Mutex
) )
// Register with Fs // Register with Fs
@@ -338,16 +337,17 @@ to start uploading.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "memory_pool_flush_time", Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute), Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed.
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
Name: "memory_pool_use_mmap", Name: "memory_pool_use_mmap",
Default: false, Default: memoryPoolUseMmap,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool.`,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -432,6 +432,8 @@ type Options struct {
ArchiveTierDelete bool `config:"archive_tier_delete"` ArchiveTierDelete bool `config:"archive_tier_delete"`
UseEmulator bool `config:"use_emulator"` UseEmulator bool `config:"use_emulator"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
PublicAccess string `config:"public_access"` PublicAccess string `config:"public_access"`
DirectoryMarkers bool `config:"directory_markers"` DirectoryMarkers bool `config:"directory_markers"`
@@ -455,6 +457,8 @@ type Fs struct {
cache *bucket.Cache // cache for container creation status cache *bucket.Cache // cache for container creation status
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
poolSize int64 // size of pages in memory pool
publicAccess container.PublicAccessType // Container Public Access Level publicAccess container.PublicAccessType // Container Public Access Level
} }
@@ -467,7 +471,7 @@ type Object struct {
size int64 // Size of the object size int64 // Size of the object
mimeType string // Content-Type of the object mimeType string // Content-Type of the object
accessTier blob.AccessTier // Blob Access Tier accessTier blob.AccessTier // Blob Access Tier
meta map[string]string // blob metadata - take metadataMu when accessing meta map[string]string // blob metadata
} }
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -667,6 +671,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
cache: bucket.NewCache(), cache: bucket.NewCache(),
cntSVCcache: make(map[string]*container.Client, 1), cntSVCcache: make(map[string]*container.Client, 1),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
poolSize: int64(opt.ChunkSize),
} }
f.publicAccess = container.PublicAccessType(opt.PublicAccess) f.publicAccess = container.PublicAccessType(opt.PublicAccess)
f.setRoot(root) f.setRoot(root)
@@ -961,9 +972,6 @@ func (f *Fs) getBlockBlobSVC(container, containerPath string) *blockblob.Client
// updateMetadataWithModTime adds the modTime passed in to o.meta. // updateMetadataWithModTime adds the modTime passed in to o.meta.
func (o *Object) updateMetadataWithModTime(modTime time.Time) { func (o *Object) updateMetadataWithModTime(modTime time.Time) {
metadataMu.Lock()
defer metadataMu.Unlock()
// Make sure o.meta is not nil // Make sure o.meta is not nil
if o.meta == nil { if o.meta == nil {
o.meta = make(map[string]string, 1) o.meta = make(map[string]string, 1)
@@ -1495,7 +1503,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir) container, directory := f.split(dir)
// Remove directory marker file // Remove directory marker file
if f.opt.DirectoryMarkers && container != "" && directory != "" { if f.opt.DirectoryMarkers && container != "" && dir != "" {
o := &Object{ o := &Object{
fs: f, fs: f,
remote: dir + "/", remote: dir + "/",
@@ -1529,10 +1537,7 @@ func (f *Fs) Hashes() hash.Set {
// Purge deletes all the files and directories including the old versions. // Purge deletes all the files and directories including the old versions.
func (f *Fs) Purge(ctx context.Context, dir string) error { func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir) container, directory := f.split(dir)
if container == "" { if container == "" || directory != "" {
return errors.New("can't purge from root")
}
if directory != "" {
// Delegate to caller if not root of a container // Delegate to caller if not root of a container
return fs.ErrorCantPurge return fs.ErrorCantPurge
} }
@@ -1589,6 +1594,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return f.NewObject(ctx, remote) return f.NewObject(ctx, remote)
} }
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
if size == int64(f.opt.ChunkSize) {
return f.pool
}
return pool.New(
time.Duration(f.opt.MemoryPoolFlushTime),
int(size),
f.ci.Transfers,
f.opt.MemoryPoolUseMmap,
)
}
// ------------------------------------------------------------ // ------------------------------------------------------------
// Fs returns the parent Fs // Fs returns the parent Fs
@@ -1632,9 +1650,6 @@ func (o *Object) Size() int64 {
// Set o.metadata from metadata // Set o.metadata from metadata
func (o *Object) setMetadata(metadata map[string]*string) { func (o *Object) setMetadata(metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(metadata) > 0 { if len(metadata) > 0 {
// Lower case the metadata // Lower case the metadata
o.meta = make(map[string]string, len(metadata)) o.meta = make(map[string]string, len(metadata))
@@ -1659,9 +1674,6 @@ func (o *Object) setMetadata(metadata map[string]*string) {
// Get metadata from o.meta // Get metadata from o.meta
func (o *Object) getMetadata() (metadata map[string]*string) { func (o *Object) getMetadata() (metadata map[string]*string) {
metadataMu.Lock()
defer metadataMu.Unlock()
if len(o.meta) == 0 { if len(o.meta) == 0 {
return nil return nil
} }
@@ -1873,7 +1885,12 @@ func (o *Object) ModTime(ctx context.Context) (result time.Time) {
// SetModTime sets the modification time of the local fs object // SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
o.updateMetadataWithModTime(modTime) // Make sure o.meta is not nil
if o.meta == nil {
o.meta = make(map[string]string, 1)
}
// Set modTimeKey in it
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
blb := o.getBlobSVC() blb := o.getBlobSVC()
opt := blob.SetMetadataOptions{} opt := blob.SetMetadataOptions{}
@@ -1965,8 +1982,8 @@ func (rs *readSeekCloser) Close() error {
return nil return nil
} }
// increment the array as LSB binary // increment the slice passed in as LSB binary
func increment(xs *[8]byte) { func increment(xs []byte) {
for i, digit := range xs { for i, digit := range xs {
newDigit := digit + 1 newDigit := digit + 1
xs[i] = newDigit xs[i] = newDigit
@@ -1977,43 +1994,22 @@ func increment(xs *[8]byte) {
} }
} }
// record chunk number and id for Close var warnStreamUpload sync.Once
type azBlock struct {
chunkNumber int
id string
}
// Implements the fs.ChunkWriter interface // uploadMultipart uploads a file using multipart upload
type azChunkWriter struct {
chunkSize int64
size int64
f *Fs
ui uploadInfo
blocksMu sync.Mutex // protects the below
blocks []azBlock // list of blocks for finalize
binaryBlockID [8]byte // block counter as LSB first 8 bytes
o *Object
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
// //
// Pass in the remote and the src object // Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
// You can also use options to hint at the desired chunk size func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
// Calculate correct partSize // Calculate correct partSize
partSize := f.opt.ChunkSize partSize := o.fs.opt.ChunkSize
totalParts := -1 totalParts := -1
size := src.Size()
// make concurrency machinery
concurrency := o.fs.opt.UploadConcurrency
if concurrency < 1 {
concurrency = 1
}
tokens := pacer.NewTokenDispenser(concurrency)
// Note that the max size of file is 4.75 TB (100 MB X 50,000 // Note that the max size of file is 4.75 TB (100 MB X 50,000
// blocks) and this is bigger than the max uncommitted block // blocks) and this is bigger than the max uncommitted block
@@ -2027,13 +2023,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
// 195GB which seems like a not too unreasonable limit. // 195GB which seems like a not too unreasonable limit.
if size == -1 { if size == -1 {
warnStreamUpload.Do(func() { warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks)) o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
}) })
} else { } else {
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize) partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) { if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes)) return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
} }
totalParts = int(fs.SizeSuffix(size) / partSize) totalParts = int(fs.SizeSuffix(size) / partSize)
if fs.SizeSuffix(size)%partSize != 0 { if fs.SizeSuffix(size)%partSize != 0 {
@@ -2043,264 +2039,173 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize) fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
chunkWriter := &azChunkWriter{ // unwrap the accounting from the input, we use wrap to put it
chunkSize: int64(partSize), // back on after the buffering
size: size, in, wrap := accounting.UnWrap(in)
f: f,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(partSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload")
return info, chunkWriter, nil
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0 // FIXME it would be nice to delete uncommitted blocks
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) { // See: https://github.com/rclone/rclone/issues/5583
if chunkNumber < 0 { //
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber) // However there doesn't seem to be an easy way of doing this other than
return -1, err // by deleting the target.
} //
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it
// doesn't then create a 0 length file and delete it to flush
// the uncommitted blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
// defer atexit.OnError(&err, func() {
// fs.Debugf(o, "Cancelling multipart upload")
// // Code goes here!
// })()
// Upload the block, with MD5 for check // Upload the chunks
m := md5.New() var (
currentChunkSize, err := io.Copy(m, reader) g, gCtx = errgroup.WithContext(ctx)
if err != nil { remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
return -1, err position = fs.SizeSuffix(0) // position in file
} memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
// If no data read, don't write the chunk finished = false // set when we have read EOF
if currentChunkSize == 0 { blocks []string // list of blocks for finalize
return 0, nil binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
} )
md5sum := m.Sum(nil) for part := 0; !finished; part++ {
transactionalMD5 := md5sum[:] // Get a block of memory from the pool and a token which limits concurrency
tokens.Get()
buf := memPool.Get()
// increment the blockID and save the blocks for finalize free := func() {
increment(&w.binaryBlockID) memPool.Put(buf) // return the buf
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:]) tokens.Put() // return the token
// Save the blockID for the commit
w.blocksMu.Lock()
w.blocks = append(w.blocks, azBlock{
chunkNumber: chunkNumber,
id: blockID,
})
w.blocksMu.Unlock()
err = w.f.pacer.Call(func() (bool, error) {
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
} }
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service. // Fail fast, in case an errgroup managed function returns an error
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5), // gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
free()
break
} }
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
if err != nil { // Read the chunk
if chunkNumber <= 8 { n, err := readers.ReadFill(in, buf) // this can never return 0, nil
return w.f.shouldRetry(ctx, err) if err == io.EOF {
if n == 0 { // end if no data
free()
break
} }
// retry all chunks once have done the first few finished = true
return true, err } else if err != nil {
free()
return fmt.Errorf("multipart upload failed to read source: %w", err)
} }
return false, nil buf = buf[:n]
})
if err != nil { // increment the blockID and save the blocks for finalize
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err) increment(binaryBlockID)
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
blocks = append(blocks, blockID)
// Transfer the chunk
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
g.Go(func() (err error) {
defer free()
// Upload the block, with MD5 for check
md5sum := md5.Sum(buf)
transactionalMD5 := md5sum[:]
err = o.fs.pacer.Call(func() (bool, error) {
bufferReader := bytes.NewReader(buf)
wrappedReader := wrap(bufferReader)
rs := readSeekCloser{wrappedReader, bufferReader}
options := blockblob.StageBlockOptions{
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
}
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
return o.fs.shouldRetry(ctx, err)
})
if err != nil {
return fmt.Errorf("multipart upload failed to upload part: %w", err)
}
return nil
})
// ready for next block
if size >= 0 {
remaining -= partSize
}
position += partSize
} }
err = g.Wait()
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize) if err != nil {
return currentChunkSize, err return err
}
// Abort the multipart upload.
//
// FIXME it would be nice to delete uncommitted blocks.
//
// See: https://github.com/rclone/rclone/issues/5583
//
// However there doesn't seem to be an easy way of doing this other than
// by deleting the target.
//
// This means that a failed upload deletes the target which isn't ideal.
//
// Uploading a zero length blob and deleting it will remove the
// uncommitted blocks I think.
//
// Could check to see if a file exists already and if it doesn't then
// create a 0 length file and delete it to flush the uncommitted
// blocks.
//
// This is what azcopy does
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
func (w *azChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
return nil
}
// Close and finalise the multipart upload
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
// sort the completed parts by part number
sort.Slice(w.blocks, func(i, j int) bool {
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
})
// Create a list of block IDs
blockIDs := make([]string, len(w.blocks))
for i := range w.blocks {
blockIDs[i] = w.blocks[i].id
} }
options := blockblob.CommitBlockListOptions{ options := blockblob.CommitBlockListOptions{
Metadata: w.o.getMetadata(), Metadata: o.getMetadata(),
Tier: parseTier(w.f.opt.AccessTier), Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: &w.ui.httpHeaders, HTTPHeaders: httpHeaders,
} }
// Finalise the upload session // Finalise the upload session
err = w.f.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options) _, err := blb.CommitBlockList(ctx, blocks, &options)
return w.f.shouldRetry(ctx, err) return o.fs.shouldRetry(ctx, err)
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err) return fmt.Errorf("multipart upload failed to finalize: %w", err)
} }
fs.Debugf(w.o, "multipart upload finished") return nil
return err
}
var warnStreamUpload sync.Once
// uploadMultipart uploads a file using multipart upload
//
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return ui, err
}
return chunkWriter.(*azChunkWriter).ui, nil
} }
// uploadSinglepart uploads a short blob using a single part upload // uploadSinglepart uploads a short blob using a single part upload
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) { func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
chunkSize := int64(o.fs.opt.ChunkSize)
// fs.Debugf(o, "Single part upload starting of object %d bytes", size) // fs.Debugf(o, "Single part upload starting of object %d bytes", size)
if size > chunkSize || size < 0 { if size > o.fs.poolSize || size < 0 {
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize) return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
} }
rw := multipart.NewRW() buf := o.fs.pool.Get()
defer fs.CheckClose(rw, &err) defer o.fs.pool.Put(buf)
n, err := io.CopyN(rw, in, size+1) n, err := readers.ReadFill(in, buf)
if err == nil {
// Check to see whether in is exactly len(buf) or bigger
var buf2 = []byte{0}
n2, err2 := readers.ReadFill(in, buf2)
if n2 != 0 || err2 != io.EOF {
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
}
}
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return fmt.Errorf("single part upload read failed: %w", err) return fmt.Errorf("single part upload read failed: %w", err)
} }
if n != size { if int64(n) != size {
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n) return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
} }
rs := &readSeekCloser{Reader: rw, Seeker: rw} b := bytes.NewReader(buf[:n])
rs := &readSeekCloser{Reader: b, Seeker: b}
options := blockblob.UploadOptions{ options := blockblob.UploadOptions{
Metadata: o.getMetadata(), Metadata: o.getMetadata(),
Tier: parseTier(o.fs.opt.AccessTier), Tier: parseTier(o.fs.opt.AccessTier),
HTTPHeaders: &ui.httpHeaders, HTTPHeaders: httpHeaders,
} }
return o.fs.pacer.Call(func() (bool, error) { // Don't retry, return a retry error instead
// rewind the reader on retry return o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = rs.Seek(0, io.SeekStart) _, err = blb.Upload(ctx, rs, &options)
if err != nil {
return false, err
}
_, err = ui.blb.Upload(ctx, rs, &options)
return o.fs.shouldRetry(ctx, err) return o.fs.shouldRetry(ctx, err)
}) })
} }
// Info needed for an upload
type uploadInfo struct {
blb *blockblob.Client
httpHeaders blob.HTTPHeaders
isDirMarker bool
}
// Prepare the object for upload
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
container, containerPath := o.split()
if container == "" || containerPath == "" {
return ui, fmt.Errorf("can't upload to root - need a container")
}
// Create parent dir/bucket if not saving directory marker
metadataMu.Lock()
_, ui.isDirMarker = o.meta[dirMetaKey]
metadataMu.Unlock()
if !ui.isDirMarker {
err = o.fs.mkdirParent(ctx, o.remote)
if err != nil {
return ui, err
}
}
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return ui, err
}
// Create the HTTP headers for the upload
ui.httpHeaders = blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
ui.httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
ui.httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
ui.httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
ui.httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
ui.httpHeaders.BlobContentType = pString(value)
}
}
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
return ui, nil
}
// Update the object with the contents of the io.Reader, modTime and size // Update the object with the contents of the io.Reader, modTime and size
// //
// The new object may have been created if an error is returned // The new object may have been created if an error is returned
@@ -2316,26 +2221,80 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errCantUpdateArchiveTierBlobs return errCantUpdateArchiveTierBlobs
} }
} }
container, containerPath := o.split()
size := src.Size() if container == "" || containerPath == "" {
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize) return fmt.Errorf("can't upload to root - need a container")
var ui uploadInfo }
// Create parent dir/bucket if not saving directory marker
if multipartUpload { _, isDirMarker := o.meta[dirMetaKey]
ui, err = o.uploadMultipart(ctx, in, src, options...) if !isDirMarker {
} else { err = o.fs.mkdirParent(ctx, o.remote)
ui, err = o.prepareUpload(ctx, src, options)
if err != nil { if err != nil {
return fmt.Errorf("failed to prepare upload: %w", err) return err
} }
err = o.uploadSinglepart(ctx, in, size, ui) }
// Update Mod time
fs.Debugf(nil, "o.meta = %+v", o.meta)
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return err
}
// Create the HTTP headers for the upload
httpHeaders := blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
if err == nil {
httpHeaders.BlobContentMD5 = sourceMD5bytes
} else {
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
}
}
}
// Apply upload options (also allows one to overwrite content-type)
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
httpHeaders.BlobCacheControl = pString(value)
case "content-disposition":
httpHeaders.BlobContentDisposition = pString(value)
case "content-encoding":
httpHeaders.BlobContentEncoding = pString(value)
case "content-language":
httpHeaders.BlobContentLanguage = pString(value)
case "content-type":
httpHeaders.BlobContentType = pString(value)
}
}
blb := o.fs.getBlockBlobSVC(container, containerPath)
size := src.Size()
multipartUpload := size < 0 || size > o.fs.poolSize
fs.Debugf(nil, "o.meta = %+v", o.meta)
if multipartUpload {
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
} else {
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
} }
if err != nil { if err != nil {
return err return err
} }
// Refresh metadata on object // Refresh metadata on object
if !ui.isDirMarker { if !isDirMarker {
o.clearMetaData() o.clearMetaData()
err = o.readMetaData(ctx) err = o.readMetaData(ctx)
if err != nil { if err != nil {
@@ -2424,14 +2383,13 @@ func parseTier(tier string) *blob.AccessTier {
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.OpenChunkWriter = &Fs{} _ fs.Object = &Object{}
_ fs.Object = &Object{} _ fs.MimeTyper = &Object{}
_ fs.MimeTyper = &Object{} _ fs.GetTierer = &Object{}
_ fs.GetTierer = &Object{} _ fs.SetTierer = &Object{}
_ fs.SetTierer = &Object{}
) )

View File

@@ -20,18 +20,17 @@ func (f *Fs) InternalTest(t *testing.T) {
func TestIncrement(t *testing.T) { func TestIncrement(t *testing.T) {
for _, test := range []struct { for _, test := range []struct {
in [8]byte in []byte
want [8]byte want []byte
}{ }{
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}}, {[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}}, {[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}}, {[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}}, {[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}}, {[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}}, {[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
} { } {
increment(&test.in) increment(test.in)
assert.Equal(t, test.want, test.in) assert.Equal(t, test.want, test.in)
} }
} }

View File

@@ -31,9 +31,9 @@ func TestIntegration2(t *testing.T) {
if *fstest.RemoteName != "" { if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set") t.Skip("Skipping as -remote set")
} }
name := "TestAzureBlob" name := "TestAzureBlob:"
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: name + ":", RemoteName: name,
NilObject: (*Object)(nil), NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"}, TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{ ChunkedUpload: fstests.ChunkedUploadConfig{

View File

@@ -32,7 +32,6 @@ import (
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket" "github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/pool" "github.com/rclone/rclone/lib/pool"
"github.com/rclone/rclone/lib/rest" "github.com/rclone/rclone/lib/rest"
@@ -58,7 +57,9 @@ const (
minChunkSize = 5 * fs.Mebi minChunkSize = 5 * fs.Mebi
defaultChunkSize = 96 * fs.Mebi defaultChunkSize = 96 * fs.Mebi
defaultUploadCutoff = 200 * fs.Mebi defaultUploadCutoff = 200 * fs.Mebi
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
memoryPoolUseMmap = false
) )
// Globals // Globals
@@ -148,18 +149,6 @@ might a maximum of "--transfers" chunks in progress at once.
5,000,000 Bytes is the minimum size.`, 5,000,000 Bytes is the minimum size.`,
Default: defaultChunkSize, Default: defaultChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--b2-upload-concurrency" chunks stored at once
in memory.`,
Default: 4,
Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: `Disable checksums for large (> upload cutoff) files. Help: `Disable checksums for large (> upload cutoff) files.
@@ -199,16 +188,16 @@ The minimum value is 1 second. The maximum value is one week.`,
Advanced: true, Advanced: true,
}, { }, {
Name: "memory_pool_flush_time", Name: "memory_pool_flush_time",
Default: fs.Duration(time.Minute), Default: memoryPoolFlushTime,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `How often internal memory buffer pools will be flushed.
Help: `How often internal memory buffer pools will be flushed. (no longer used)`, Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
This option controls how often unused buffers will be removed from the pool.`,
}, { }, {
Name: "memory_pool_use_mmap", Name: "memory_pool_use_mmap",
Default: false, Default: memoryPoolUseMmap,
Advanced: true, Advanced: true,
Hide: fs.OptionHideBoth, Help: `Whether to use mmap buffers in internal memory pool.`,
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -235,10 +224,11 @@ type Options struct {
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableCheckSum bool `config:"disable_checksum"` DisableCheckSum bool `config:"disable_checksum"`
DownloadURL string `config:"download_url"` DownloadURL string `config:"download_url"`
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"` DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
} }
@@ -263,6 +253,7 @@ type Fs struct {
authMu sync.Mutex // lock for authorizing the account authMu sync.Mutex // lock for authorizing the account
pacer *fs.Pacer // To pace and retry the API calls pacer *fs.Pacer // To pace and retry the API calls
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
pool *pool.Pool // memory pool
} }
// Object describes a b2 object // Object describes a b2 object
@@ -467,6 +458,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
uploads: make(map[string][]*api.GetUploadURLResponse), uploads: make(map[string][]*api.GetUploadURLResponse),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
pool: pool.New(
time.Duration(opt.MemoryPoolFlushTime),
int(opt.ChunkSize),
ci.Transfers,
opt.MemoryPoolUseMmap,
),
} }
f.setRoot(root) f.setRoot(root)
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -600,24 +597,23 @@ func (f *Fs) clearUploadURL(bucketID string) {
f.uploadMu.Unlock() f.uploadMu.Unlock()
} }
// getRW gets a RW buffer and an upload token // getBuf gets a buffer of f.opt.ChunkSize and an upload token
// //
// If noBuf is set then it just gets an upload token // If noBuf is set then it just gets an upload token
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) { func (f *Fs) getBuf(noBuf bool) (buf []byte) {
f.uploadToken.Get() f.uploadToken.Get()
if !noBuf { if !noBuf {
rw = multipart.NewRW() buf = f.pool.Get()
} }
return rw return buf
} }
// putRW returns a RW buffer to the memory pool and returns an upload // putBuf returns a buffer to the memory pool and an upload token
// token
// //
// If buf is nil then it just returns the upload token // If noBuf is set then it just returns the upload token
func (f *Fs) putRW(rw *pool.RW) { func (f *Fs) putBuf(buf []byte, noBuf bool) {
if rw != nil { if !noBuf {
_ = rw.Close() f.pool.Put(buf)
} }
f.uploadToken.Put() f.uploadToken.Put()
} }
@@ -1297,11 +1293,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
if err != nil { if err != nil {
return err return err
} }
err = up.Copy(ctx) return up.Upload(ctx)
if err != nil {
return err
}
return dstObj.decodeMetaDataFileInfo(up.info)
} }
dstBucket, dstPath := dstObj.split() dstBucket, dstPath := dstObj.split()
@@ -1430,7 +1422,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
if err != nil { if err != nil {
return "", err return "", err
} }
absPath := "/" + urlEncode(bucketPath) absPath := "/" + bucketPath
link = RootURL + "/file/" + urlEncode(bucket) + absPath link = RootURL + "/file/" + urlEncode(bucket) + absPath
bucketType, err := f.getbucketType(ctx, bucket) bucketType, err := f.getbucketType(ctx, bucket)
if err != nil { if err != nil {
@@ -1869,11 +1861,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return err return err
} }
if size < 0 { if size == -1 {
// Check if the file is large enough for a chunked upload (needs to be at least two chunks) // Check if the file is large enough for a chunked upload (needs to be at least two chunks)
rw := o.fs.getRW(false) buf := o.fs.getBuf(false)
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize)) n, err := io.ReadFull(in, buf)
if err == nil { if err == nil {
bufReader := bufio.NewReader(in) bufReader := bufio.NewReader(in)
in = bufReader in = bufReader
@@ -1884,34 +1876,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
fs.Debugf(o, "File is big enough for chunked streaming") fs.Debugf(o, "File is big enough for chunked streaming")
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil) up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
if err != nil { if err != nil {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
// NB Stream returns the buffer and token // NB Stream returns the buffer and token
err = up.Stream(ctx, rw) return up.Stream(ctx, buf)
if err != nil { } else if err == io.EOF || err == io.ErrUnexpectedEOF {
return err
}
return o.decodeMetaDataFileInfo(up.info)
} else if err == io.EOF {
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n) fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
defer o.fs.putRW(rw) defer o.fs.putBuf(buf, false)
size = n size = int64(n)
in = rw in = bytes.NewReader(buf[:n])
} else { } else {
o.fs.putRW(rw) o.fs.putBuf(buf, false)
return err return err
} }
} else if size > int64(o.fs.opt.UploadCutoff) { } else if size > int64(o.fs.opt.UploadCutoff) {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{ up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
Open: o.fs,
OpenOptions: options,
})
if err != nil { if err != nil {
return err return err
} }
up := chunkWriter.(*largeUpload) return up.Upload(ctx)
return o.decodeMetaDataFileInfo(up.info)
} }
modTime := src.ModTime(ctx) modTime := src.ModTime(ctx)
@@ -2019,41 +2003,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return o.decodeMetaDataFileInfo(&response) return o.decodeMetaDataFileInfo(&response)
} }
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// FIXME what if file is smaller than 1 chunk?
if f.opt.Versions {
return info, nil, errNotWithVersions
}
if f.opt.VersionAt.IsSet() {
return info, nil, errNotWithVersionAt
}
//size := src.Size()
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
bucket, _ := o.split()
err = f.makeBucket(ctx, bucket)
if err != nil {
return info, nil, err
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(f.opt.ChunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
return info, up, err
}
// Remove an object // Remove an object
func (o *Object) Remove(ctx context.Context) error { func (o *Object) Remove(ctx context.Context) error {
bucket, bucketPath := o.split() bucket, bucketPath := o.split()
@@ -2081,15 +2030,14 @@ func (o *Object) ID() string {
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Purger = &Fs{} _ fs.Purger = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.PublicLinker = &Fs{} _ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{} _ fs.Object = &Object{}
_ fs.Object = &Object{} _ fs.MimeTyper = &Object{}
_ fs.MimeTyper = &Object{} _ fs.IDer = &Object{}
_ fs.IDer = &Object{}
) )

View File

@@ -1,19 +1,10 @@
package b2 package b2
import ( import (
"bytes"
"context"
"fmt"
"testing" "testing"
"time" "time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
// Test b2 string encoding // Test b2 string encoding
@@ -177,100 +168,3 @@ func TestParseTimeString(t *testing.T) {
} }
} }
// The integration tests do a reasonable job of testing the normal
// copy but don't test the chunked copy.
func (f *Fs) InternalTestChunkedCopy(t *testing.T) {
ctx := context.Background()
contents := random.String(8 * 1024 * 1024)
item := fstest.NewItem("chunked-copy", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
src := fstests.PutTestContents(ctx, t, f, &item, contents, true)
defer func() {
assert.NoError(t, src.Remove(ctx))
}()
var itemCopy = item
itemCopy.Path += ".copy"
// Set copy cutoff to mininum value so we make chunks
origCutoff := f.opt.CopyCutoff
f.opt.CopyCutoff = minChunkSize
defer func() {
f.opt.CopyCutoff = origCutoff
}()
// Do the copy
dst, err := f.Copy(ctx, src, itemCopy.Path)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, src.Size(), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.True(t, srcModTime.Equal(dstModTime))
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
}
// The integration tests do a reasonable job of testing the normal
// streaming upload but don't test the chunked streaming upload.
func (f *Fs) InternalTestChunkedStreamingUpload(t *testing.T, size int) {
ctx := context.Background()
contents := random.String(size)
item := fstest.NewItem(fmt.Sprintf("chunked-streaming-upload-%d", size), contents, fstest.Time("2001-05-06T04:05:06.499Z"))
// Set chunk size to mininum value so we make chunks
origOpt := f.opt
f.opt.ChunkSize = minChunkSize
f.opt.UploadCutoff = 0
defer func() {
f.opt = origOpt
}()
// Do the streaming upload
src := object.NewStaticObjectInfo(item.Path, item.ModTime, -1, true, item.Hashes, f)
in := bytes.NewBufferString(contents)
dst, err := f.PutStream(ctx, in, src)
require.NoError(t, err)
defer func() {
assert.NoError(t, dst.Remove(ctx))
}()
// Check size
assert.Equal(t, int64(size), dst.Size())
// Check modtime
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
assert.Equal(t, srcModTime, dstModTime)
// Make sure contents are correct
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents, "Contents incorrect")
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("ChunkedCopy", f.InternalTestChunkedCopy)
for _, size := range []fs.SizeSuffix{
minChunkSize - 1,
minChunkSize,
minChunkSize + 1,
(3 * minChunkSize) / 2,
(5 * minChunkSize) / 2,
} {
t.Run(fmt.Sprintf("ChunkedStreamingUpload/%d", size), func(t *testing.T) {
f.InternalTestChunkedStreamingUpload(t, int(size))
})
}
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -5,6 +5,7 @@
package b2 package b2
import ( import (
"bytes"
"context" "context"
"crypto/sha1" "crypto/sha1"
"encoding/hex" "encoding/hex"
@@ -13,6 +14,7 @@ import (
"io" "io"
"strings" "strings"
"sync" "sync"
"time"
"github.com/rclone/rclone/backend/b2/api" "github.com/rclone/rclone/backend/b2/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -78,14 +80,12 @@ type largeUpload struct {
wrap accounting.WrapFn // account parts being transferred wrap accounting.WrapFn // account parts being transferred
id string // ID of the file being uploaded id string // ID of the file being uploaded
size int64 // total size size int64 // total size
parts int // calculated number of parts, if known parts int64 // calculated number of parts, if known
sha1smu sync.Mutex // mutex to protect sha1s
sha1s []string // slice of SHA1s for each part sha1s []string // slice of SHA1s for each part
uploadMu sync.Mutex // lock for upload variable uploadMu sync.Mutex // lock for upload variable
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
chunkSize int64 // chunk size to use chunkSize int64 // chunk size to use
src *Object // if copying, object we are reading from src *Object // if copying, object we are reading from
info *api.FileInfo // final response with info about the object
} }
// newLargeUpload starts an upload of object o from in with metadata in src // newLargeUpload starts an upload of object o from in with metadata in src
@@ -93,16 +93,18 @@ type largeUpload struct {
// If newInfo is set then metadata from that will be used instead of reading it from src // If newInfo is set then metadata from that will be used instead of reading it from src
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) { func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
size := src.Size() size := src.Size()
parts := 0 parts := int64(0)
sha1SliceSize := int64(maxParts)
chunkSize := defaultChunkSize chunkSize := defaultChunkSize
if size == -1 { if size == -1 {
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize) fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
} else { } else {
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize) chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
parts = int(size / int64(chunkSize)) parts = size / int64(chunkSize)
if size%int64(chunkSize) != 0 { if size%int64(chunkSize) != 0 {
parts++ parts++
} }
sha1SliceSize = parts
} }
opts := rest.Opts{ opts := rest.Opts{
@@ -150,7 +152,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
id: response.ID, id: response.ID,
size: size, size: size,
parts: parts, parts: parts,
sha1s: make([]string, 0, 16), sha1s: make([]string, sha1SliceSize),
chunkSize: int64(chunkSize), chunkSize: int64(chunkSize),
} }
// unwrap the accounting from the input, we use wrap to put it // unwrap the accounting from the input, we use wrap to put it
@@ -169,26 +171,24 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
// This should be returned with returnUploadURL when finished // This should be returned with returnUploadURL when finished
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) { func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
up.uploadMu.Lock() up.uploadMu.Lock()
if len(up.uploads) > 0 { defer up.uploadMu.Unlock()
if len(up.uploads) == 0 {
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err := up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
}
} else {
upload, up.uploads = up.uploads[0], up.uploads[1:] upload, up.uploads = up.uploads[0], up.uploads[1:]
up.uploadMu.Unlock()
return upload, nil
}
up.uploadMu.Unlock()
opts := rest.Opts{
Method: "POST",
Path: "/b2_get_upload_part_url",
}
var request = api.GetUploadPartURLRequest{
ID: up.id,
}
err = up.f.pacer.Call(func() (bool, error) {
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
return up.f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to get upload URL: %w", err)
} }
return upload, nil return upload, nil
} }
@@ -203,39 +203,10 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
up.uploadMu.Unlock() up.uploadMu.Unlock()
} }
// Add an sha1 to the being built up sha1s // Transfer a chunk
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) { func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
up.sha1smu.Lock() err := up.f.pacer.Call(func() (bool, error) {
defer up.sha1smu.Unlock() fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
if len(up.sha1s) < chunkNumber+1 {
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
}
up.sha1s[chunkNumber] = sha1
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(1)
}
err = up.f.pacer.Call(func() (bool, error) {
// Discover the size by seeking to the end
size, err = reader.Seek(0, io.SeekEnd)
if err != nil {
return false, err
}
// rewind the reader on retry and after reading size
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
// Get upload URL // Get upload URL
upload, err := up.getUploadURL(ctx) upload, err := up.getUploadURL(ctx)
@@ -243,8 +214,8 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
return false, err return false, err
} }
in := newHashAppendingReader(reader, sha1.New()) in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
sizeWithHash := size + int64(in.AdditionalLength()) size := int64(len(body)) + int64(in.AdditionalLength())
// Authorization // Authorization
// //
@@ -274,10 +245,10 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
Body: up.wrap(in), Body: up.wrap(in),
ExtraHeaders: map[string]string{ ExtraHeaders: map[string]string{
"Authorization": upload.AuthorizationToken, "Authorization": upload.AuthorizationToken,
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1), "X-Bz-Part-Number": fmt.Sprintf("%d", part),
sha1Header: "hex_digits_at_end", sha1Header: "hex_digits_at_end",
}, },
ContentLength: &sizeWithHash, ContentLength: &size,
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -285,7 +256,7 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response) resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
retry, err := up.f.shouldRetry(ctx, resp, err) retry, err := up.f.shouldRetry(ctx, resp, err)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err) fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
// On retryable error clear PartUploadURL // On retryable error clear PartUploadURL
if retry { if retry {
@@ -293,30 +264,30 @@ func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader i
upload = nil upload = nil
} }
up.returnUploadURL(upload) up.returnUploadURL(upload)
up.addSha1(chunkNumber, in.HexSum()) up.sha1s[part-1] = in.HexSum()
return retry, err return retry, err
}) })
if err != nil { if err != nil {
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err) fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
} else { } else {
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber) fs.Debugf(up.o, "Done sending chunk %d", part)
} }
return size, err return err
} }
// Copy a chunk // Copy a chunk
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error { func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
err := up.f.pacer.Call(func() (bool, error) { err := up.f.pacer.Call(func() (bool, error) {
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize) fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
Path: "/b2_copy_part", Path: "/b2_copy_part",
} }
offset := int64(part) * up.chunkSize // where we are in the source file offset := (part - 1) * up.chunkSize // where we are in the source file
var request = api.CopyPartRequest{ var request = api.CopyPartRequest{
SourceID: up.src.id, SourceID: up.src.id,
LargeFileID: up.id, LargeFileID: up.id,
PartNumber: int64(part + 1), PartNumber: part,
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1), Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
} }
var response api.UploadPartResponse var response api.UploadPartResponse
@@ -325,7 +296,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
if err != nil { if err != nil {
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err) fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
} }
up.addSha1(part, response.SHA1) up.sha1s[part-1] = response.SHA1
return retry, err return retry, err
}) })
if err != nil { if err != nil {
@@ -336,8 +307,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64)
return err return err
} }
// Close closes off the large upload // finish closes off the large upload
func (up *largeUpload) Close(ctx context.Context) error { func (up *largeUpload) finish(ctx context.Context) error {
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts) fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -355,12 +326,11 @@ func (up *largeUpload) Close(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
up.info = &response return up.o.decodeMetaDataFileInfo(&response)
return nil
} }
// Abort aborts the large upload // cancel aborts the large upload
func (up *largeUpload) Abort(ctx context.Context) error { func (up *largeUpload) cancel(ctx context.Context) error {
fs.Debugf(up.o, "Cancelling large file %s", up.what) fs.Debugf(up.o, "Cancelling large file %s", up.what)
opts := rest.Opts{ opts := rest.Opts{
Method: "POST", Method: "POST",
@@ -385,99 +355,157 @@ func (up *largeUpload) Abort(ctx context.Context) error {
// reaches EOF. // reaches EOF.
// //
// Note that initialUploadBlock must be returned to f.putBuf() // Note that initialUploadBlock must be returned to f.putBuf()
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) { func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id) fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
hasMoreParts = true hasMoreParts = true
) )
up.size = initialUploadBlock.Size() up.size = int64(len(initialUploadBlock))
up.parts = 0 g.Go(func() error {
for part := 0; hasMoreParts; part++ { for part := int64(1); hasMoreParts; part++ {
// Get a block of memory from the pool and token which limits concurrency. // Get a block of memory from the pool and token which limits concurrency.
var rw *pool.RW var buf []byte
if part == 0 { if part == 1 {
rw = initialUploadBlock buf = initialUploadBlock
} else { } else {
rw = up.f.getRW(false) buf = up.f.getBuf(false)
}
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
up.f.putRW(rw)
break
}
// Read the chunk
var n int64
if part == 0 {
n = rw.Size()
} else {
n, err = io.CopyN(rw, up.in, up.chunkSize)
if err == io.EOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
hasMoreParts = false
} else if err != nil {
// other kinds of errors indicate failure
up.f.putRW(rw)
return err
} }
}
// Keep stats up to date // Fail fast, in case an errgroup managed function returns an error
up.parts += 1 // gCtx is cancelled. There is no point in uploading all the other parts.
up.size += n if gCtx.Err() != nil {
if part > maxParts { up.f.putBuf(buf, false)
up.f.putRW(rw) return nil
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts) }
}
part := part // for the closure // Read the chunk
g.Go(func() (err error) { var n int
defer up.f.putRW(rw) if part == 1 {
_, err = up.WriteChunk(gCtx, part, rw) n = len(buf)
return err } else {
}) n, err = io.ReadFull(up.in, buf)
} if err == io.ErrUnexpectedEOF {
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
buf = buf[:n]
hasMoreParts = false
} else if err == io.EOF {
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
up.f.putBuf(buf, false)
return nil
} else if err != nil {
// other kinds of errors indicate failure
up.f.putBuf(buf, false)
return err
}
}
// Keep stats up to date
up.parts = part
up.size += int64(n)
if part > maxParts {
up.f.putBuf(buf, false)
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
}
part := part // for the closure
g.Go(func() (err error) {
defer up.f.putBuf(buf, false)
return up.transferChunk(gCtx, part, buf)
})
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) up.sha1s = up.sha1s[:up.parts]
return up.finish(ctx)
} }
// Copy the chunks from the source to the destination // Upload uploads the chunks from the input
func (up *largeUpload) Copy(ctx context.Context) (err error) { func (up *largeUpload) Upload(ctx context.Context) (err error) {
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })() defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id) fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
var ( var (
g, gCtx = errgroup.WithContext(ctx) g, gCtx = errgroup.WithContext(ctx)
remaining = up.size remaining = up.size
uploadPool *pool.Pool
ci = fs.GetConfig(ctx)
) )
g.SetLimit(up.f.opt.UploadConcurrency) // If using large chunk size then make a temporary pool
for part := 0; part < up.parts; part++ { if up.chunkSize <= int64(up.f.opt.ChunkSize) {
// Fail fast, in case an errgroup managed function returns an error uploadPool = up.f.pool
// gCtx is cancelled. There is no point in copying all the other parts. } else {
if gCtx.Err() != nil { uploadPool = pool.New(
break time.Duration(up.f.opt.MemoryPoolFlushTime),
} int(up.chunkSize),
ci.Transfers,
reqSize := remaining up.f.opt.MemoryPoolUseMmap,
if reqSize >= up.chunkSize { )
reqSize = up.chunkSize defer uploadPool.Flush()
}
part := part // for the closure
g.Go(func() (err error) {
return up.copyChunk(gCtx, part, reqSize)
})
remaining -= reqSize
} }
// Get an upload token and a buffer
getBuf := func() (buf []byte) {
up.f.getBuf(true)
if !up.doCopy {
buf = uploadPool.Get()
}
return buf
}
// Put an upload token and a buffer
putBuf := func(buf []byte) {
if !up.doCopy {
uploadPool.Put(buf)
}
up.f.putBuf(nil, true)
}
g.Go(func() error {
for part := int64(1); part <= up.parts; part++ {
// Get a block of memory from the pool and token which limits concurrency.
buf := getBuf()
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx.Err() != nil {
putBuf(buf)
return nil
}
reqSize := remaining
if reqSize >= up.chunkSize {
reqSize = up.chunkSize
}
if !up.doCopy {
// Read the chunk
buf = buf[:reqSize]
_, err = io.ReadFull(up.in, buf)
if err != nil {
putBuf(buf)
return err
}
}
part := part // for the closure
g.Go(func() (err error) {
defer putBuf(buf)
if !up.doCopy {
err = up.transferChunk(gCtx, part, buf)
} else {
err = up.copyChunk(gCtx, part, reqSize)
}
return err
})
remaining -= reqSize
}
return nil
})
err = g.Wait() err = g.Wait()
if err != nil { if err != nil {
return err return err
} }
return up.Close(ctx) return up.finish(ctx)
} }

View File

@@ -52,7 +52,7 @@ func (e *Error) Error() string {
out += ": " + e.Message out += ": " + e.Message
} }
if e.ContextInfo != nil { if e.ContextInfo != nil {
out += fmt.Sprintf(" (%s)", string(e.ContextInfo)) out += fmt.Sprintf(" (%+v)", e.ContextInfo)
} }
return out return out
} }
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
// ItemFields are the fields needed for FileInfo // ItemFields are the fields needed for FileInfo
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by" var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
// Types of things in Item/ItemMini // Types of things in Item
const ( const (
ItemTypeFolder = "folder" ItemTypeFolder = "folder"
ItemTypeFile = "file" ItemTypeFile = "file"
@@ -72,31 +72,20 @@ const (
ItemStatusDeleted = "deleted" ItemStatusDeleted = "deleted"
) )
// ItemMini is a subset of the elements in a full Item returned by some API calls
type ItemMini struct {
Type string `json:"type"`
ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"`
Etag string `json:"etag"`
SHA1 string `json:"sha1"`
Name string `json:"name"`
}
// Item describes a folder or a file as returned by Get Folder Items and others // Item describes a folder or a file as returned by Get Folder Items and others
type Item struct { type Item struct {
Type string `json:"type"` Type string `json:"type"`
ID string `json:"id"` ID string `json:"id"`
SequenceID int64 `json:"sequence_id,string"` SequenceID string `json:"sequence_id"`
Etag string `json:"etag"` Etag string `json:"etag"`
SHA1 string `json:"sha1"` SHA1 string `json:"sha1"`
Name string `json:"name"` Name string `json:"name"`
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261 Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
CreatedAt Time `json:"created_at"` CreatedAt Time `json:"created_at"`
ModifiedAt Time `json:"modified_at"` ModifiedAt Time `json:"modified_at"`
ContentCreatedAt Time `json:"content_created_at"` ContentCreatedAt Time `json:"content_created_at"`
ContentModifiedAt Time `json:"content_modified_at"` ContentModifiedAt Time `json:"content_modified_at"`
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
Parent ItemMini `json:"parent"`
SharedLink struct { SharedLink struct {
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
Access string `json:"access,omitempty"` Access string `json:"access,omitempty"`
@@ -292,30 +281,3 @@ type User struct {
Address string `json:"address"` Address string `json:"address"`
AvatarURL string `json:"avatar_url"` AvatarURL string `json:"avatar_url"`
} }
// FileTreeChangeEventTypes are the events that can require cache invalidation
var FileTreeChangeEventTypes = map[string]struct{}{
"ITEM_COPY": {},
"ITEM_CREATE": {},
"ITEM_MAKE_CURRENT_VERSION": {},
"ITEM_MODIFY": {},
"ITEM_MOVE": {},
"ITEM_RENAME": {},
"ITEM_TRASH": {},
"ITEM_UNDELETE_VIA_TRASH": {},
"ITEM_UPLOAD": {},
}
// Event is an array element in the response returned from /events
type Event struct {
EventType string `json:"event_type"`
EventID string `json:"event_id"`
Source Item `json:"source"`
}
// Events is returned from /events
type Events struct {
ChunkSize int64 `json:"chunk_size"`
Entries []Event `json:"entries"`
NextStreamPosition int64 `json:"next_stream_position"`
}

View File

@@ -149,23 +149,6 @@ func init() {
Default: "", Default: "",
Help: "Only show items owned by the login (email address) passed in.", Help: "Only show items owned by the login (email address) passed in.",
Advanced: true, Advanced: true,
}, {
Name: "impersonate",
Default: "",
Help: `Impersonate this user ID when using a service account.
Setting this flag allows rclone, when using a JWT service account, to
act on behalf of another user by setting the as-user header.
The user ID is the Box identifier for a user. User IDs can found for
any user via the GET /users endpoint, which is only available to
admins, or by calling the GET /users/me endpoint with an authenticated
user session.
See: https://developer.box.com/guides/authentication/jwt/as-user/
`,
Advanced: true,
Sensitive: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -279,29 +262,19 @@ type Options struct {
AccessToken string `config:"access_token"` AccessToken string `config:"access_token"`
ListChunk int `config:"list_chunk"` ListChunk int `config:"list_chunk"`
OwnedBy string `config:"owned_by"` OwnedBy string `config:"owned_by"`
Impersonate string `config:"impersonate"`
}
// ItemMeta defines metadata we cache for each Item ID
type ItemMeta struct {
SequenceID int64 // the most recent event processed for this item
ParentID string // ID of the parent directory of this item
Name string // leaf name of this item
} }
// Fs represents a remote box // Fs represents a remote box
type Fs struct { type Fs struct {
name string // name of this remote name string // name of this remote
root string // the path we are working on root string // the path we are working on
opt Options // parsed options opt Options // parsed options
features *fs.Features // optional features features *fs.Features // optional features
srv *rest.Client // the connection to the server srv *rest.Client // the connection to the server
dirCache *dircache.DirCache // Map of directory path to directory id dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry tokenRenewer *oauthutil.Renew // renew the token on expiry
uploadToken *pacer.TokenDispenser // control concurrency uploadToken *pacer.TokenDispenser // control concurrency
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
} }
// Object describes a box object // Object describes a box object
@@ -449,14 +422,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
name: name, name: name,
root: root, root: root,
opt: *opt, opt: *opt,
srv: rest.NewClient(client).SetRoot(rootURL), srv: rest.NewClient(client).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
uploadToken: pacer.NewTokenDispenser(ci.Transfers), uploadToken: pacer.NewTokenDispenser(ci.Transfers),
itemMetaCacheMu: new(sync.Mutex),
itemMetaCache: make(map[string]ItemMeta),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{
CaseInsensitive: true, CaseInsensitive: true,
@@ -469,11 +440,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken) f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
} }
// If using impersonate set an as-user header
if f.opt.Impersonate != "" {
f.srv.SetHeader("as-user", f.opt.Impersonate)
}
jsonFile, ok := m.Get("box_config_file") jsonFile, ok := m.Get("box_config_file")
boxSubType, boxSubTypeOk := m.Get("box_sub_type") boxSubType, boxSubTypeOk := m.Get("box_sub_type")
@@ -716,17 +682,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} }
entries = append(entries, o) entries = append(entries, o)
} }
// Cache some metadata for this Item to help us process events later
// on. In particular, the box event API does not provide the old path
// of the Item when it is renamed/deleted/moved/etc.
f.itemMetaCacheMu.Lock()
cachedItemMeta, found := f.itemMetaCache[info.ID]
if !found || cachedItemMeta.SequenceID < info.SequenceID {
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
}
f.itemMetaCacheMu.Unlock()
return false return false
}) })
if err != nil { if err != nil {
@@ -1166,7 +1121,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
// CleanUp empties the trash // CleanUp empties the trash
func (f *Fs) CleanUp(ctx context.Context) (err error) { func (f *Fs) CleanUp(ctx context.Context) (err error) {
var ( var (
deleteErrors atomic.Uint64 deleteErrors = int64(0)
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers) concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
wg sync.WaitGroup wg sync.WaitGroup
) )
@@ -1182,7 +1137,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
err := f.deletePermanently(ctx, item.Type, item.ID) err := f.deletePermanently(ctx, item.Type, item.ID)
if err != nil { if err != nil {
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err) fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
deleteErrors.Add(1) atomic.AddInt64(&deleteErrors, 1)
} }
}() }()
} else { } else {
@@ -1191,250 +1146,12 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
return false return false
}) })
wg.Wait() wg.Wait()
if deleteErrors.Load() != 0 { if deleteErrors != 0 {
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load()) return fmt.Errorf("failed to delete %d trash items", deleteErrors)
} }
return err return err
} }
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
//
// Automatically restarts itself in case of unexpected behavior of the remote.
//
// Close the returned channel to stop being notified.
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
go func() {
// get the `stream_position` early so all changes from now on get processed
streamPosition, err := f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
}
var ticker *time.Ticker
var tickerC <-chan time.Time
for {
select {
case pollInterval, ok := <-pollIntervalChan:
if !ok {
if ticker != nil {
ticker.Stop()
}
return
}
if ticker != nil {
ticker.Stop()
ticker, tickerC = nil, nil
}
if pollInterval != 0 {
ticker = time.NewTicker(pollInterval)
tickerC = ticker.C
}
case <-tickerC:
if streamPosition == "" {
streamPosition, err = f.changeNotifyStreamPosition(ctx)
if err != nil {
fs.Infof(f, "Failed to get StreamPosition: %s", err)
continue
}
}
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
if err != nil {
fs.Infof(f, "Change notify listener failure: %s", err)
}
}
}
}()
}
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", "now")
opts.Parameters.Set("stream_type", "changes")
var result api.Events
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
return strconv.FormatInt(result.NextStreamPosition, 10), nil
}
// Attempts to construct the full path for an object, given the ID of its
// parent directory and the name of the object.
//
// Can return "" if the parentID is not currently in the directory cache.
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
fullPath = ""
name := f.opt.Enc.ToStandardName(childName)
if parentID != "" {
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
if len(parentDir) > 0 {
fullPath = parentDir + "/" + name
} else {
fullPath = name
}
}
} else {
// No parent, this object is at the root
fullPath = name
}
return fullPath
}
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
nextStreamPosition = streamPosition
// box can send duplicate Event IDs; filter any in a single notify run
processedEventIDs := make(map[string]bool)
for {
limit := f.opt.ListChunk
// box only allows a max of 500 events
if limit > 500 {
limit = 500
}
opts := rest.Opts{
Method: "GET",
Path: "/events",
Parameters: fieldsValue(),
}
opts.Parameters.Set("stream_position", nextStreamPosition)
opts.Parameters.Set("stream_type", "changes")
opts.Parameters.Set("limit", strconv.Itoa(limit))
var result api.Events
var resp *http.Response
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return "", err
}
if result.ChunkSize != int64(len(result.Entries)) {
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
}
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
if result.ChunkSize == 0 {
return nextStreamPosition, nil
}
type pathToClear struct {
path string
entryType fs.EntryType
}
var pathsToClear []pathToClear
newEventIDs := 0
for _, entry := range result.Entries {
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
continue
}
processedEventIDs[entry.EventID] = true
newEventIDs++
if entry.Source.ID == "" { // missing File or Folder ID
continue
}
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
continue
}
// Only interested in event types that result in a file tree change
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
continue
}
f.itemMetaCacheMu.Lock()
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
if cachedItemMetaFound {
if itemMeta.SequenceID >= entry.Source.SequenceID {
// Item in the cache has the same or newer SequenceID than
// this event. Ignore this event, it must be old.
f.itemMetaCacheMu.Unlock()
continue
}
// This event is newer. Delete its entry from the cache,
// we'll notify about its change below, then it's up to a
// future list operation to repopulate the cache.
delete(f.itemMetaCache, entry.Source.ID)
}
f.itemMetaCacheMu.Unlock()
entryType := fs.EntryDirectory
if entry.Source.Type == api.ItemTypeFile {
entryType = fs.EntryObject
}
// The box event only includes the new path for the object (e.g.
// the path after the object was moved). If there was an old path
// saved in our cache, it must be cleared.
if cachedItemMetaFound {
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
if path != "" {
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
}
// If this is a directory, also delete it from the dir cache.
// This will effectively invalidate the item metadata cache
// entries for all descendents of this directory, since we
// will no longer be able to construct a full path for them.
// This is exactly what we want, since we don't want to notify
// on the paths of these descendents if one of their ancestors
// has been renamed/deleted.
if entry.Source.Type == api.ItemTypeFolder {
f.dirCache.FlushDir(path)
}
}
// If the item is "active", then it is not trashed or deleted, so
// it potentially has a valid parent.
//
// Construct the new path of the object, based on the Parent ID
// and its name. If we get an empty result, it means we don't
// currently know about this object so notification is unnecessary.
if entry.Source.ItemStatus == api.ItemStatusActive {
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
if path != "" {
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
}
}
}
// box can sometimes repeatedly return the same Event IDs within a
// short period of time. If it stops giving us new ones, treat it
// the same as if it returned us none at all.
if newEventIDs == 0 {
return nextStreamPosition, nil
}
notifiedPaths := make(map[string]bool)
for _, p := range pathsToClear {
if _, ok := notifiedPaths[p.path]; ok {
continue
}
notifiedPaths[p.path] = true
notifyFunc(p.path, p.entryType)
}
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
}
}
// DirCacheFlush resets the directory cache - used in testing as an // DirCacheFlush resets the directory cache - used in testing as an
// optional interface // optional interface
func (f *Fs) DirCacheFlush() { func (f *Fs) DirCacheFlush() {

View File

@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: "TestCache:", RemoteName: "TestCache:",
NilObject: (*cache.Object)(nil), NilObject: (*cache.Object)(nil),
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"}, UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
}) })

View File

@@ -40,7 +40,6 @@ func TestIntegration(t *testing.T) {
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"PublicLink", "PublicLink",
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
"MergeDirs", "MergeDirs",
"DirCacheFlush", "DirCacheFlush",
"UserInfo", "UserInfo",

View File

@@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return do(ctx, uRemote, expire, unlink) return do(ctx, uRemote, expire, unlink)
} }
// PutUnchecked in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
// //
// May create the object even if it returns an error - if so // May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return // will return the object and the error, otherwise will return

View File

@@ -11,7 +11,7 @@ import (
) )
var ( var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"} unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
unimplementableObjectMethods = []string{} unimplementableObjectMethods = []string{}
) )

View File

@@ -257,16 +257,6 @@ func isMetadataFile(filename string) bool {
return strings.HasSuffix(filename, metaFileExt) return strings.HasSuffix(filename, metaFileExt)
} }
// Checks whether a file is a metadata file and returns the original
// file name and a flag indicating whether it was a metadata file or
// not.
func unwrapMetadataFile(filename string) (string, bool) {
if !isMetadataFile(filename) {
return "", false
}
return filename[:len(filename)-len(metaFileExt)], true
}
// makeDataName generates the file name for a data file with specified compression mode // makeDataName generates the file name for a data file with specified compression mode
func makeDataName(remote string, size int64, mode int) (newRemote string) { func makeDataName(remote string, size int64, mode int) (newRemote string) {
if mode != Uncompressed { if mode != Uncompressed {
@@ -989,8 +979,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
fs.Logf(f, "path %q entryType %d", path, entryType) fs.Logf(f, "path %q entryType %d", path, entryType)
var ( var (
wrappedPath string wrappedPath string
isMetadataFile bool
) )
switch entryType { switch entryType {
case fs.EntryDirectory: case fs.EntryDirectory:
@@ -998,10 +987,7 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
case fs.EntryObject: case fs.EntryObject:
// Note: All we really need to do to monitor the object is to check whether the metadata changed, // Note: All we really need to do to monitor the object is to check whether the metadata changed,
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same. // as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
wrappedPath, isMetadataFile = unwrapMetadataFile(path) wrappedPath = makeMetadataName(path)
if !isMetadataFile {
return
}
default: default:
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType) fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
return return

View File

@@ -14,26 +14,23 @@ import (
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
) )
var defaultOpt = fstests.Opt{
RemoteName: "TestCompress:",
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"OpenChunkWriter",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{},
}
// TestIntegration runs integration tests against the remote // TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) { func TestIntegration(t *testing.T) {
fstests.Run(t, &defaultOpt) opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
UnimplementableFsMethods: []string{
"OpenWriterAt",
"MergeDirs",
"DirCacheFlush",
"PutUnchecked",
"PutStream",
"UserInfo",
"Disconnect",
},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
UnimplementableObjectMethods: []string{}}
fstests.Run(t, &opt)
} }
// TestRemoteGzip tests GZIP compression // TestRemoteGzip tests GZIP compression
@@ -43,13 +40,27 @@ func TestRemoteGzip(t *testing.T) {
} }
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip") tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
name := "TestCompressGzip" name := "TestCompressGzip"
opt := defaultOpt fstests.Run(t, &fstests.Opt{
opt.RemoteName = name + ":" RemoteName: name + ":",
opt.ExtraConfig = []fstests.ExtraConfigItem{ NilObject: (*Object)(nil),
{Name: name, Key: "type", Value: "compress"}, UnimplementableFsMethods: []string{
{Name: name, Key: "remote", Value: tempdir}, "OpenWriterAt",
{Name: name, Key: "compression_mode", Value: "gzip"}, "MergeDirs",
} "DirCacheFlush",
opt.QuickTestOK = true "PutUnchecked",
fstests.Run(t, &opt) "PutStream",
"UserInfo",
"Disconnect",
},
UnimplementableObjectMethods: []string{
"GetTier",
"SetTier",
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "type", Value: "compress"},
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
} }

View File

@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{ fstests.Run(t, &fstests.Opt{
RemoteName: *fstest.RemoteName, RemoteName: *fstest.RemoteName,
NilObject: (*crypt.Object)(nil), NilObject: (*crypt.Object)(nil),
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
}) })
} }
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato")},
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base64"}, {Name: name, Key: "filename_encoding", Value: "base64"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "standard"}, {Name: name, Key: "filename_encryption", Value: "standard"},
{Name: name, Key: "filename_encoding", Value: "base32768"}, {Name: name, Key: "filename_encoding", Value: "base32768"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")}, {Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
{Name: name, Key: "filename_encryption", Value: "off"}, {Name: name, Key: "filename_encryption", Value: "off"},
}, },
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
{Name: name, Key: "filename_encryption", Value: "obfuscate"}, {Name: name, Key: "filename_encryption", Value: "obfuscate"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
{Name: name, Key: "no_data_encryption", Value: "true"}, {Name: name, Key: "no_data_encryption", Value: "true"},
}, },
SkipBadWindowsCharacters: true, SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"}, UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"}, UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true, QuickTestOK: true,
}) })

View File

@@ -594,32 +594,10 @@ This resource key requirement only applies to a subset of old files.
Note also that opening the folder once in the web interface (with the Note also that opening the folder once in the web interface (with the
user you've authenticated rclone with) seems to be enough so that the user you've authenticated rclone with) seems to be enough so that the
resource key is not needed. resource key is no needed.
`, `,
Advanced: true, Advanced: true,
Sensitive: true, Sensitive: true,
}, {
Name: "fast_list_bug_fix",
Help: `Work around a bug in Google Drive listing.
Normally rclone will work around a bug in Google Drive when using
--fast-list (ListR) where the search "(A in parents) or (B in
parents)" returns nothing sometimes. See #3114, #4289 and
https://issuetracker.google.com/issues/149522397
Rclone detects this by finding no items in more than one directory
when listing and retries them as lists of individual directories.
This means that if you have a lot of empty directories rclone will end
up listing them all individually and this can take many more API
calls.
This flag allows the work-around to be disabled. This is **not**
recommended in normal use - only if you have a particular case you are
having trouble with like many empty directories.
`,
Advanced: true,
Default: true,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -694,7 +672,6 @@ type Options struct {
SkipShortcuts bool `config:"skip_shortcuts"` SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"` SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
ResourceKey string `config:"resource_key"` ResourceKey string `config:"resource_key"`
FastListBugFix bool `config:"fast_list_bug_fix"`
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"` EnvAuth bool `config:"env_auth"`
} }
@@ -1914,7 +1891,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
// drive where (A in parents) or (B in parents) returns nothing // drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and // sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397 // https://issuetracker.google.com/issues/149522397
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems { if len(dirs) > 1 && !foundItems {
if atomic.SwapInt32(&f.grouping, 1) != 1 { if atomic.SwapInt32(&f.grouping, 1) != 1 {
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs)) fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
} }

View File

@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
509, // Bandwidth Limit Exceeded 509, // Bandwidth Limit Exceeded
} }
var errorRegex = regexp.MustCompile(`#(\d{1,3})`) var errorRegex = regexp.MustCompile(`#\d{1,3}`)
func parseFichierError(err error) int { func parseFichierError(err error) int {
matches := errorRegex.FindStringSubmatch(err.Error()) matches := errorRegex.FindStringSubmatch(err.Error())
if len(matches) == 0 { if len(matches) == 0 {
return 0 return 0
} }
code, err := strconv.Atoi(matches[1]) code, err := strconv.Atoi(matches[0])
if err != nil { if err != nil {
fs.Debugf(nil, "failed parsing fichier error: %v", err) fs.Debugf(nil, "failed parsing fichier error: %v", err)
return 0 return 0

View File

@@ -158,9 +158,9 @@ type Fs struct {
tokenMu sync.Mutex // hold when reading the token tokenMu sync.Mutex // hold when reading the token
token string // current access token token string // current access token
tokenExpiry time.Time // time the current token expires tokenExpiry time.Time // time the current token expires
tokenExpired atomic.Int32 tokenExpired int32 // read and written with atomic
canCopyWithName bool // set if detected that can use fi_name in copy canCopyWithName bool // set if detected that can use fi_name in copy
precision time.Duration // precision reported precision time.Duration // precision reported
} }
// Object describes a filefabric object // Object describes a filefabric object
@@ -243,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
err = status // return the error from the RPC err = status // return the error from the RPC
code := status.GetCode() code := status.GetCode()
if code == "login_token_expired" { if code == "login_token_expired" {
f.tokenExpired.Add(1) atomic.AddInt32(&f.tokenExpired, 1)
} else { } else {
for _, retryCode := range retryStatusCodes { for _, retryCode := range retryStatusCodes {
if code == retryCode.code { if code == retryCode.code {
@@ -323,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
var refreshed = false var refreshed = false
defer func() { defer func() {
if refreshed { if refreshed {
f.tokenExpired.Store(0) atomic.StoreInt32(&f.tokenExpired, 0)
} }
f.tokenMu.Unlock() f.tokenMu.Unlock()
}() }()
expired := f.tokenExpired.Load() != 0 expired := atomic.LoadInt32(&f.tokenExpired) != 0
if expired { if expired {
fs.Debugf(f, "Token invalid - refreshing") fs.Debugf(f, "Token invalid - refreshing")
} }

View File

@@ -15,7 +15,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/jlaffaye/ftp" "github.com/rclone/ftp"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
@@ -249,6 +249,7 @@ type Fs struct {
pool []*ftp.ServerConn pool []*ftp.ServerConn
drain *time.Timer // used to drain the pool when we stop using the connections drain *time.Timer // used to drain the pool when we stop using the connections
tokens *pacer.TokenDispenser tokens *pacer.TokenDispenser
tlsConf *tls.Config
pacer *fs.Pacer // pacer for FTP connections pacer *fs.Pacer // pacer for FTP connections
fGetTime bool // true if the ftp library accepts GetTime fGetTime bool // true if the ftp library accepts GetTime
fSetTime bool // true if the ftp library accepts SetTime fSetTime bool // true if the ftp library accepts SetTime
@@ -361,36 +362,10 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
return fserrors.ShouldRetry(err), err return fserrors.ShouldRetry(err), err
} }
// Get a TLS config with a unique session cache.
//
// We can't share session caches between connections.
//
// See: https://github.com/rclone/rclone/issues/7234
func (f *Fs) tlsConfig() *tls.Config {
var tlsConfig *tls.Config
if f.opt.TLS || f.opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: f.opt.Host,
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
}
if f.opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
}
if f.opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
return tlsConfig
}
// Open a new connection to the FTP server. // Open a new connection to the FTP server.
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) { func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
// tls.Config for this connection only. Will be used for data
// and control connections.
tlsConfig := f.tlsConfig()
// Make ftp library dial with fshttp dialer optionally using TLS // Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
@@ -408,7 +383,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return nil, err return nil, err
} }
// Connect using cleartext only for non TLS // Connect using cleartext only for non TLS
if tlsConfig == nil { if f.tlsConf == nil {
return conn, nil return conn, nil
} }
// Initial connection only needs to be cleartext for explicit TLS // Initial connection only needs to be cleartext for explicit TLS
@@ -417,7 +392,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
return conn, nil return conn, nil
} }
// Upgrade connection to TLS // Upgrade connection to TLS
tlsConn := tls.Client(conn, tlsConfig) tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us // Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up // If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426 // See: https://github.com/rclone/rclone/issues/6426
@@ -439,9 +414,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf // Our dialer takes care of TLS but ftp library also needs tlsConf
// as a trigger for sending PSBZ and PROT options to server. // as a trigger for sending PSBZ and PROT options to server.
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS { } else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig)) ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
} }
if f.opt.DisableEPSV { if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -596,6 +571,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
if opt.TLS && opt.ExplicitTLS { if opt.TLS && opt.ExplicitTLS {
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config") return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
} }
var tlsConfig *tls.Config
if opt.TLS || opt.ExplicitTLS {
tlsConfig = &tls.Config{
ServerName: opt.Host,
InsecureSkipVerify: opt.SkipVerifyTLSCert,
}
if opt.TLSCacheSize > 0 {
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
}
if opt.DisableTLS13 {
tlsConfig.MaxVersion = tls.VersionTLS12
}
}
u := protocol + path.Join(dialAddr+"/", root) u := protocol + path.Join(dialAddr+"/", root)
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
f := &Fs{ f := &Fs{
@@ -608,6 +596,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
pass: pass, pass: pass,
dialAddr: dialAddr, dialAddr: dialAddr,
tokens: pacer.NewTokenDispenser(opt.Concurrency), tokens: pacer.NewTokenDispenser(opt.Concurrency),
tlsConf: tlsConfig,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -23,7 +23,6 @@ func TestIntegration(t *testing.T) {
NilObject: (*hasher.Object)(nil), NilObject: (*hasher.Object)(nil),
UnimplementableFsMethods: []string{ UnimplementableFsMethods: []string{
"OpenWriterAt", "OpenWriterAt",
"OpenChunkWriter",
}, },
UnimplementableObjectMethods: []string{}, UnimplementableObjectMethods: []string{},
} }

View File

@@ -21,7 +21,6 @@ import (
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/pacer"
) )
// Fs represents a HDFS server // Fs represents a HDFS server
@@ -32,15 +31,8 @@ type Fs struct {
opt Options // options for this backend opt Options // options for this backend
ci *fs.ConfigInfo // global config ci *fs.ConfigInfo // global config
client *hdfs.Client client *hdfs.Client
pacer *fs.Pacer // pacer for API calls
} }
const (
minSleep = 20 * time.Millisecond
maxSleep = 10 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go // copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
func getKerberosClient() (*krb.Client, error) { func getKerberosClient() (*krb.Client, error) {
configPath := os.Getenv("KRB5_CONFIG") configPath := os.Getenv("KRB5_CONFIG")
@@ -122,7 +114,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
ci: fs.GetConfig(ctx), ci: fs.GetConfig(ctx),
client: client, client: client,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
} }
f.features = (&fs.Features{ f.features = (&fs.Features{

View File

@@ -5,12 +5,10 @@ package hdfs
import ( import (
"context" "context"
"errors"
"io" "io"
"path" "path"
"time" "time"
"github.com/colinmarc/hdfs/v2"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/readers"
@@ -108,7 +106,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Update object // Update object
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
realpath := o.fs.realpath(o.remote) realpath := o.fs.realpath(src.Remote())
dirname := path.Dir(realpath) dirname := path.Dir(realpath)
fs.Debugf(o.fs, "update [%s]", realpath) fs.Debugf(o.fs, "update [%s]", realpath)
@@ -143,23 +141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// If the datanodes have acknowledged all writes but not yet err = out.Close()
// to the namenode, FileWriter.Close can return ErrReplicating
// (wrapped in an os.PathError). This indicates that all data
// has been written, but the lease is still open for the file.
//
// It is safe in this case to either ignore the error (and let
// the lease expire on its own) or to call Close multiple
// times until it completes without an error. The Java client,
// for context, always chooses to retry, with exponential
// backoff.
err = o.fs.pacer.Call(func() (bool, error) {
err := out.Close()
if err == nil {
return false, nil
}
return errors.Is(err, hdfs.ErrReplicating), err
})
if err != nil { if err != nil {
cleanup() cleanup()
return err return err

View File

@@ -67,13 +67,9 @@ const (
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
legacyConfigVersion = 0 legacyConfigVersion = 0
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token" teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth" teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
teliaseCloudClientID = "desktop" teliaCloudClientID = "desktop"
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
telianoCloudClientID = "desktop"
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token" tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth" tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
@@ -142,11 +138,8 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
Value: "legacy", Value: "legacy",
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.", Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
}, { }, {
Value: "telia_se", Value: "telia",
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).", Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
}, {
Value: "telia_no",
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
}, { }, {
Value: "tele2", Value: "tele2",
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.", Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
@@ -245,32 +238,17 @@ machines.`)
return nil, fmt.Errorf("error while saving token: %w", err) return nil, fmt.Errorf("error while saving token: %w", err)
} }
return fs.ConfigGoto("choose_device") return fs.ConfigGoto("choose_device")
case "telia_se": // telia_se cloud config case "telia": // telia cloud config
m.Set("configVersion", fmt.Sprint(configVersion)) m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, teliaseCloudClientID) m.Set(configClientID, teliaCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL) m.Set(configTokenURL, teliaCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{ return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{ OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{ Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL, AuthURL: teliaCloudAuthURL,
TokenURL: teliaseCloudTokenURL, TokenURL: teliaCloudTokenURL,
}, },
ClientID: teliaseCloudClientID, ClientID: teliaCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
},
})
case "telia_no": // telia_no cloud config
m.Set("configVersion", fmt.Sprint(configVersion))
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"}, Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL, RedirectURL: oauthutil.RedirectLocalhostURL,
}, },

View File

@@ -13,7 +13,6 @@ import (
"runtime" "runtime"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"unicode/utf8" "unicode/utf8"
@@ -244,7 +243,7 @@ type Fs struct {
precision time.Duration // precision of local filesystem precision time.Duration // precision of local filesystem
warnedMu sync.Mutex // used for locking access to 'warned'. warnedMu sync.Mutex // used for locking access to 'warned'.
warned map[string]struct{} // whether we have warned about this string warned map[string]struct{} // whether we have warned about this string
xattrSupported atomic.Int32 // whether xattrs are supported xattrSupported int32 // whether xattrs are supported (atomic access)
// do os.Lstat or os.Stat // do os.Lstat or os.Stat
lstat func(name string) (os.FileInfo, error) lstat func(name string) (os.FileInfo, error)
@@ -292,7 +291,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
lstat: os.Lstat, lstat: os.Lstat,
} }
if xattrSupported { if xattrSupported {
f.xattrSupported.Store(1) f.xattrSupported = 1
} }
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc) f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{ f.features = (&fs.Features{
@@ -642,13 +641,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
// //
// If it isn't empty it will return an error // If it isn't empty it will return an error
func (f *Fs) Rmdir(ctx context.Context, dir string) error { func (f *Fs) Rmdir(ctx context.Context, dir string) error {
localPath := f.localPath(dir) return os.Remove(f.localPath(dir))
if fi, err := os.Stat(localPath); err != nil {
return err
} else if !fi.IsDir() {
return fs.ErrorIsFile
}
return os.Remove(localPath)
} }
// Precision of the file system // Precision of the file system

View File

@@ -6,6 +6,7 @@ package local
import ( import (
"fmt" "fmt"
"strings" "strings"
"sync/atomic"
"syscall" "syscall"
"github.com/pkg/xattr" "github.com/pkg/xattr"
@@ -27,7 +28,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris) // Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR { if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
// Show xattrs not supported // Show xattrs not supported
if f.xattrSupported.CompareAndSwap(1, 0) { if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
fs.Errorf(f, "xattrs not supported - disabling: %v", err) fs.Errorf(f, "xattrs not supported - disabling: %v", err)
} }
return true return true
@@ -40,7 +41,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
// It doesn't return any attributes owned by this backend in // It doesn't return any attributes owned by this backend in
// metadataKeys // metadataKeys
func (o *Object) getXattr() (metadata fs.Metadata, err error) { func (o *Object) getXattr() (metadata fs.Metadata, err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 { if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil, nil return nil, nil
} }
var list []string var list []string
@@ -89,7 +90,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
// //
// It doesn't set any attributes owned by this backend in metadataKeys // It doesn't set any attributes owned by this backend in metadataKeys
func (o *Object) setXattr(metadata fs.Metadata) (err error) { func (o *Object) setXattr(metadata fs.Metadata) (err error) {
if !xattrSupported || o.fs.xattrSupported.Load() == 0 { if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
return nil return nil
} }
for k, value := range metadata { for k, value := range metadata {

View File

@@ -206,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
// cache *mega.Mega on username so we can reuse and share // cache *mega.Mega on username so we can re-use and share
// them between remotes. They are expensive to make as they // them between remotes. They are expensive to make as they
// contain all the objects and sharing the objects makes the // contain all the objects and sharing the objects makes the
// move code easier as we don't have to worry about mixing // move code easier as we don't have to worry about mixing

View File

@@ -572,18 +572,15 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
case "url": case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL return fs.ConfigInput("url_end", "config_site_url", `Site URL
Examples: Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
- "mysite"
- "https://XXX.sharepoint.com/sites/mysite"
- "https://XXX.sharepoint.com/teams/ID"
`) `)
case "url_end": case "url_end":
siteURL := config.Result siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`) re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
match := re.FindStringSubmatch(siteURL) match := re.FindStringSubmatch(siteURL)
if len(match) == 2 { if len(match) == 2 {
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: match[1], relativePath: "/sites/" + match[1],
}) })
} }
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{ return chooseDrive(ctx, name, m, srv, chooseDriveOpt{

View File

@@ -767,17 +767,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return f.shouldRetry(ctx, resp, err) return f.shouldRetry(ctx, resp, err)
}) })
if err != nil { if err != nil {
if apiError, ok := err.(*Error); ok {
// Work around a bug maybe in opendrive or maybe in rclone.
//
// We should know whether the folder exists or not by the call to
// FindDir above so exactly why it is not found here is a mystery.
//
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
if apiError.Info.Message == "Folder is already deleted" {
return fs.DirEntries{}, nil
}
}
return nil, fmt.Errorf("failed to get folder list: %w", err) return nil, fmt.Errorf("failed to get folder list: %w", err)
} }

View File

@@ -13,6 +13,7 @@ import (
"github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
) )
const ( const (
@@ -127,3 +128,18 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256) request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
} }
} }
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
if fs.opt.SSEKMSKeyID != "" {
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
}
if fs.opt.SSECustomerAlgorithm != "" {
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
}
if fs.opt.SSECustomerKey != "" {
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
}
if fs.opt.SSECustomerKeySha256 != "" {
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
}
}

View File

@@ -6,7 +6,6 @@ package oracleobjectstorage
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"strings" "strings"
"time" "time"
@@ -197,32 +196,6 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
// for "dir" and it returns "dirKey" // for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) ( func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) { uploads []*objectstorage.MultipartUpload, err error) {
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
}
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
if err != nil {
return nil, err
}
if len(pastUploads) > 0 {
sort.Slice(pastUploads, func(i, j int) bool {
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
})
return pastUploads[:1], nil
}
return nil, err
}
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{} uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{ req := objectstorage.ListMultipartUploadsRequest{
@@ -244,13 +217,7 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) { if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue continue
} }
if exact { uploads = append(uploads, &response.Items[index])
if *item.Object == directory {
uploads = append(uploads, &response.Items[index])
}
} else {
uploads = append(uploads, &response.Items[index])
}
} }
if response.OpcNextPage == nil { if response.OpcNextPage == nil {
break break
@@ -259,34 +226,3 @@ func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directo
} }
return uploads, nil return uploads, nil
} }
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
req := objectstorage.ListMultipartUploadPartsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
Limit: common.Int(1000),
}
var response objectstorage.ListMultipartUploadPartsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploadParts(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return uploadedParts, err
}
for _, item := range response.Items {
uploadedParts[*item.PartNumber] = item
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploadedParts, nil
}

View File

@@ -1,441 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/pool"
"golang.org/x/net/http/httpguts"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/hash"
)
var warnStreamUpload sync.Once
// Info needed for an upload
type uploadInfo struct {
req *objectstorage.PutObjectRequest
md5sumHex string
}
type objectChunkWriter struct {
chunkSize int64
size int64
f *Fs
bucket *string
key *string
uploadID *string
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
partsToCommitMu sync.Mutex
existingParts map[int]objectstorage.MultipartUploadPartSummary
eTag string
md5sMu sync.Mutex
md5s []byte
ui uploadInfo
o *Object
}
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
return err
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(
ctx context.Context,
remote string,
src fs.ObjectInfo,
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
ui, err := o.prepareUpload(ctx, src, options)
if err != nil {
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
// 48 GiB which seems like a not too unreasonable limit.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
if err != nil {
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
}
bucketName, bucketPath := o.split()
chunkWriter := &objectChunkWriter{
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: &bucketName,
key: &bucketPath,
uploadID: &uploadID,
existingParts: existingParts,
ui: ui,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: o.fs.opt.UploadConcurrency,
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
}
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
if chunkNumber < 0 {
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
return -1, err
}
// Only account after the checksum reads have been done
if do, ok := reader.(pool.DelayAccountinger); ok {
// To figure out this number, do a transfer and if the accounted size is 0 or a
// multiple of what it should be, increase or decrease this number.
do.DelayAccounting(2)
}
m := md5.New()
currentChunkSize, err := io.Copy(m, reader)
if err != nil {
return -1, err
}
// If no data read, don't write the chunk
if currentChunkSize == 0 {
return 0, nil
}
md5sumBinary := m.Sum([]byte{})
w.addMd5(&md5sumBinary, int64(chunkNumber))
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
// Object storage requires 1 <= PartNumber <= 10000
ossPartNumber := chunkNumber + 1
if existing, ok := w.existingParts[ossPartNumber]; ok {
if md5sum == *existing.Md5 {
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
w.addCompletedPart(existing.PartNumber, existing.Etag)
return currentChunkSize, nil
}
}
req := objectstorage.UploadPartRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
UploadPartNum: common.Int(ossPartNumber),
ContentLength: common.Int64(currentChunkSize),
ContentMD5: common.String(md5sum),
}
w.o.applyPartUploadOptions(w.ui.req, &req)
var resp objectstorage.UploadPartResponse
err = w.f.pacer.Call(func() (bool, error) {
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
// rewind the reader on retry and after reading md5
_, err = reader.Seek(0, io.SeekStart)
if err != nil {
return false, err
}
req.UploadPartBody = io.NopCloser(reader)
resp, err = w.f.srv.UploadPart(ctx, req)
if err != nil {
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
// retry all chunks once have done the first few
return true, err
}
return false, err
})
if err != nil {
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
}
w.addCompletedPart(&ossPartNumber, resp.ETag)
return currentChunkSize, err
}
// add a part number and etag to the completed parts
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
w.partsToCommitMu.Lock()
defer w.partsToCommitMu.Unlock()
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
PartNum: partNum,
Etag: eTag,
})
}
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
req := objectstorage.CommitMultipartUploadRequest{
NamespaceName: common.String(w.f.opt.Namespace),
BucketName: w.bucket,
ObjectName: w.key,
UploadId: w.uploadID,
}
req.PartsToCommit = w.partsToCommit
var resp objectstorage.CommitMultipartUploadResponse
err = w.f.pacer.Call(func() (bool, error) {
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
// if multipart is corrupted, we will abort the uploadId
if isMultiPartUploadCorrupted(err) {
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
_ = w.Abort(ctx)
return false, err
}
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
w.eTag = *resp.ETag
hashOfHashes := md5.Sum(w.md5s)
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
gotMultipartMd5 := *resp.OpcMultipartMd5
if wantMultipartMd5 != gotMultipartMd5 {
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
}
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
return nil
}
func isMultiPartUploadCorrupted(err error) bool {
if err == nil {
return false
}
// Check if this oci-err object, and if it is multipart commit error
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetCode() == "InvalidUploadPart" {
return true
}
}
return false
}
func (w *objectChunkWriter) Abort(ctx context.Context) error {
fs.Debugf(w.o, "Cancelling multipart upload")
err := w.o.fs.abortMultiPartUpload(
ctx,
w.bucket,
w.key,
w.uploadID)
if err != nil {
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
} else {
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
}
return err
}
// addMd5 adds a binary md5 to the md5 calculated so far
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
w.md5sMu.Lock()
defer w.md5sMu.Unlock()
start := chunkNumber * md5.Size
end := start + md5.Size
if extend := end - int64(len(w.md5s)); extend > 0 {
w.md5s = append(w.md5s, make([]byte, extend)...)
}
copy(w.md5s[start:end], (*md5binary)[:])
}
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
bucket, bucketPath := o.split()
ui.req = &objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucket),
ObjectName: common.String(bucketPath),
}
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
// Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, src, options)
if err != nil {
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
}
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
// merge metadata into request and user metadata
for k, v := range meta {
pv := common.String(v)
k = strings.ToLower(k)
switch k {
case "cache-control":
ui.req.CacheControl = pv
case "content-disposition":
ui.req.ContentDisposition = pv
case "content-encoding":
ui.req.ContentEncoding = pv
case "content-language":
ui.req.ContentLanguage = pv
case "content-type":
ui.req.ContentType = pv
case "tier":
// ignore
case "mtime":
// mtime in meta overrides source ModTime
metaModTime, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
} else {
modTime = metaModTime
}
case "btime":
// write as metadata since we can't set it
ui.req.OpcMeta[k] = v
default:
ui.req.OpcMeta[k] = v
}
}
// Set the mtime in the metadata
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
size := src.Size()
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var md5sumBase64 string
if !isMultipart || !o.fs.opt.DisableChecksum {
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
hashBytes, err := hex.DecodeString(ui.md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if isMultipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
}
}
}
}
// Set the content type if it isn't set already
if ui.req.ContentType == nil {
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
}
if size >= 0 {
ui.req.ContentLength = common.Int64(size)
}
if md5sumBase64 != "" {
ui.req.ContentMD5 = &md5sumBase64
}
o.applyPutOptions(ui.req, options...)
useBYOKPutObject(o.fs, ui.req)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
ui.req.StorageTier = storageTier
}
// Check metadata keys and values are valid
for key, value := range ui.req.OpcMeta {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(ui.req.OpcMeta, key)
} else if value == "" {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(ui.req.OpcMeta, key)
} else if !httpguts.ValidHeaderFieldValue(value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
delete(ui.req.OpcMeta, key)
}
}
return ui, nil
}
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
bucketName, bucketPath := o.split()
f := o.fs
if f.opt.AttemptResumeUpload {
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
if err == nil && len(resumeUploads) > 0 {
uploadID = *resumeUploads[0].UploadId
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
if err == nil {
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
return uploadID, existingParts, err
}
}
}
req := objectstorage.CreateMultipartUploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
}
req.Object = common.String(bucketPath)
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyMultipartUploadOptions(putReq, &req)
var resp objectstorage.CreateMultipartUploadResponse
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return "", existingParts, err
}
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
uploadID = *resp.UploadId
fs.Debugf(o, "created new upload id: %v", uploadID)
return uploadID, existingParts, err
}

View File

@@ -4,14 +4,12 @@
package oracleobjectstorage package oracleobjectstorage
import ( import (
"bytes"
"context" "context"
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"os"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@@ -20,8 +18,10 @@ import (
"github.com/ncw/swift/v2" "github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage" "github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
) )
// ------------------------------------------------------------ // ------------------------------------------------------------
@@ -367,28 +367,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
return resp.HTTPResponse().Body, nil return resp.HTTPResponse().Body, nil
} }
func isZeroLength(streamReader io.Reader) bool {
switch v := streamReader.(type) {
case *bytes.Buffer:
return v.Len() == 0
case *bytes.Reader:
return v.Len() == 0
case *strings.Reader:
return v.Len() == 0
case *os.File:
fi, err := v.Stat()
if err != nil {
return false
}
return fi.Size() == 0
default:
return false
}
}
// Update an object if it has changed // Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, _ := o.split() bucketName, bucketPath := o.split()
err = o.fs.makeBucket(ctx, bucketName) err = o.fs.makeBucket(ctx, bucketName)
if err != nil { if err != nil {
return err return err
@@ -396,24 +377,142 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// determine if we like upload single or multipart. // determine if we like upload single or multipart.
size := src.Size() size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) multipart := size >= int64(o.fs.opt.UploadCutoff)
if isZeroLength(in) {
multipart = false // Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
} }
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart { if multipart {
err = o.uploadMultipart(ctx, src, in, options...) chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
useBYOKUpload(o.fs, &uploadRequest)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
if err != nil { if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err return err
} }
} else { } else {
ui, err := o.prepareUpload(ctx, src, options) req := objectstorage.PutObjectRequest{
if err != nil { NamespaceName: common.String(o.fs.opt.Namespace),
return fmt.Errorf("failed to prepare upload: %w", err) BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
} }
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
useBYOKPutObject(o.fs, &req)
var resp objectstorage.PutObjectResponse var resp objectstorage.PutObjectResponse
err = o.fs.pacer.CallNoRetry(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
ui.req.PutObjectBody = io.NopCloser(in) resp, err = o.fs.srv.PutObject(ctx, req)
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
return shouldRetry(ctx, resp.HTTPResponse(), err) return shouldRetry(ctx, resp.HTTPResponse(), err)
}) })
if err != nil { if err != nil {
@@ -492,24 +591,28 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
} }
} }
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) { func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
req.ContentType = putReq.ContentType // Apply upload options
req.ContentLanguage = putReq.ContentLanguage for _, option := range options {
req.ContentEncoding = putReq.ContentEncoding key, value := option.Header()
req.ContentDisposition = putReq.ContentDisposition lowerKey := strings.ToLower(key)
req.CacheControl = putReq.CacheControl switch lowerKey {
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta) case "":
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm // ignore
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey case "content-encoding":
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256 req.ContentEncoding = common.String(value)
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId case "content-language":
} req.ContentLanguage = common.String(value)
case "content-type":
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) { req.ContentType = common.String(value)
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm default:
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256 req.Metadata[lowerKey] = value
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId } else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
} }
func metadataWithOpcPrefix(src map[string]string) map[string]string { func metadataWithOpcPrefix(src map[string]string) map[string]string {

View File

@@ -13,10 +13,9 @@ import (
const ( const (
maxSizeForCopy = 4768 * 1024 * 1024 maxSizeForCopy = 4768 * 1024 * 1024
maxUploadParts = 10000 minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadConcurrency = 10
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute) defaultCopyTimeoutDuration = fs.Duration(time.Minute)
@@ -56,14 +55,12 @@ type Options struct {
ConfigProfile string `config:"config_profile"` ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxUploadParts int `config:"max_upload_parts"`
UploadConcurrency int `config:"upload_concurrency"` UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"` DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"` CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"` StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"` LeavePartsOnError bool `config:"leave_parts_on_error"`
AttemptResumeUpload bool `config:"attempt_resume_upload"`
NoCheckBucket bool `config:"no_check_bucket"` NoCheckBucket bool `config:"no_check_bucket"`
SSEKMSKeyID string `config:"sse_kms_key_id"` SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"` SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
@@ -160,8 +157,9 @@ The minimum is 0 and the maximum is 5 GiB.`,
Help: `Chunk size to use for uploading. Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
as multipart uploads using this chunk size. photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer. in memory per transfer.
@@ -183,20 +181,6 @@ statistics displayed with "-P" flag.
`, `,
Default: minChunkSize, Default: minChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "max_upload_parts",
Help: `Maximum number of parts in a multipart upload.
This option defines the maximum number of multipart chunks to use
when doing a multipart upload.
OCI has max parts limit of 10,000 chunks.
Rclone will automatically increase the chunk size when uploading a
large file of a known size to stay below this number of chunks limit.
`,
Default: maxUploadParts,
Advanced: true,
}, { }, {
Name: "upload_concurrency", Name: "upload_concurrency",
Help: `Concurrency for multipart uploads. Help: `Concurrency for multipart uploads.
@@ -254,24 +238,12 @@ to start uploading.`,
encoder.EncodeDot, encoder.EncodeDot,
}, { }, {
Name: "leave_parts_on_error", Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery. Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions. It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up. additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "attempt_resume_upload",
Help: `If true attempt to resume previously started multipart upload for the object.
This will be helpful to speed up multipart transfers by resuming uploads from past session.
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
aborted and a new multipart upload is started with the new chunk size.
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
`, `,
Default: false, Default: false,
Advanced: true, Advanced: true,

View File

@@ -318,6 +318,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
remote := *object.Name remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote) remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) { if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue continue
} }
remote = remote[len(prefix):] remote = remote[len(prefix):]
@@ -557,15 +558,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
}) })
} }
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) { func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == nil || *uploadID == "" { if uploadID == "" {
return nil return nil
} }
request := objectstorage.AbortMultipartUploadRequest{ request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace), NamespaceName: common.String(f.opt.Namespace),
BucketName: bucketName, BucketName: common.String(bucketName),
ObjectName: bucketPath, ObjectName: common.String(bucketPath),
UploadId: uploadID, UploadId: common.String(uploadID),
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request) resp, err := f.srv.AbortMultipartUpload(ctx, request)
@@ -588,7 +589,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
if operations.SkipDestructive(ctx, what, "remove pending upload") { if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue continue
} }
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId) _ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
} }
} else { } else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.") fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
@@ -683,13 +684,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = &Fs{} _ fs.Fs = &Fs{}
_ fs.Copier = &Fs{} _ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{} _ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{} _ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{} _ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{} _ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{} _ fs.Object = &Object{}
_ fs.MimeTyper = &Object{} _ fs.MimeTyper = &Object{}

View File

@@ -1215,7 +1215,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri
return nil, fmt.Errorf("failed to upload: %w", err) return nil, fmt.Errorf("failed to upload: %w", err)
} }
// refresh uploaded file info // refresh uploaded file info
// Compared to `newfile.File` this upgrades several fields... // Compared to `newfile.File` this upgrades several feilds...
// audit, links, modified_time, phase, revision, and web_content_link // audit, links, modified_time, phase, revision, and web_content_link
return f.getFile(ctx, newfile.File.ID) return f.getFile(ctx, newfile.File.ID)
} }

View File

@@ -1,4 +1,3 @@
// Package protondrive implements the Proton Drive backend
package protondrive package protondrive
import ( import (
@@ -6,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"log"
"path" "path"
"strings" "strings"
"time" "time"
@@ -38,15 +38,15 @@ const (
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
clientUIDKey = "client_uid" clientUIDKey = "clientUID"
clientAccessTokenKey = "client_access_token" clientAccessTokenKey = "clientAccessToken"
clientRefreshTokenKey = "client_refresh_token" clientRefreshTokenKey = "clientRefreshToken"
clientSaltedKeyPassKey = "client_salted_key_pass" clientSaltedKeyPassKey = "clientSaltedKeyPass"
) )
var ( var (
errCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size") ErrCanNotUploadFileWithUnknownSize = errors.New("proton Drive can't upload files with unknown size")
errCanNotPurgeRootDirectory = errors.New("can't purge root directory") ErrCanNotPurgeRootDirectory = errors.New("can't purge root directory")
// for the auth/deauth handler // for the auth/deauth handler
_mapper configmap.Mapper _mapper configmap.Mapper
@@ -61,60 +61,19 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Options: []fs.Option{{ Options: []fs.Option{{
Name: "username", Name: "username",
Help: `The username of your proton account`, Help: `The username of your proton drive account`,
Required: true, Required: true,
}, { }, {
Name: "password", Name: "password",
Help: "The password of your proton account.", Help: "The password of your proton drive account.",
Required: true, Required: true,
IsPassword: true, IsPassword: true,
}, {
Name: "mailbox_password",
Help: `The mailbox password of your two-password proton account.
For more information regarding the mailbox password, please check the
following official knowledge base article:
https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
`,
IsPassword: true,
Advanced: true,
}, { }, {
Name: "2fa", Name: "2fa",
Help: `The 2FA code Help: `The 2FA code
The value can also be provided with --protondrive-2fa=000000
The 2FA code of your proton drive account if the account is set up with The 2FA code of your proton drive account if the account is set up with
two-factor authentication`, two-factor authentication`,
Required: false, Required: false,
}, {
Name: clientUIDKey,
Help: "Client uid key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: clientAccessTokenKey,
Help: "Client access token key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: clientRefreshTokenKey,
Help: "Client refresh token key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, {
Name: clientSaltedKeyPassKey,
Help: "Client salted key pass key (internal use only)",
Required: false,
Advanced: true,
Sensitive: true,
Hide: fs.OptionHideBoth,
}, { }, {
Name: config.ConfigEncoding, Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp, Help: config.ConfigEncodingHelp,
@@ -151,8 +110,6 @@ When a file upload is cancelled or failed before completion, a draft will be
created and the subsequent upload of the same file to the same location will be created and the subsequent upload of the same file to the same location will be
reported as a conflict. reported as a conflict.
The value can also be set by --protondrive-replace-existing-draft=true
If the option is set to true, the draft will be replaced and then the upload If the option is set to true, the draft will be replaced and then the upload
operation will restart. If there are other clients also uploading at the same operation will restart. If there are other clients also uploading at the same
file location at the same time, the behavior is currently unknown. Need to set file location at the same time, the behavior is currently unknown. Need to set
@@ -166,10 +123,6 @@ will be returned, and no upload will happen.`,
Name: "enable_caching", Name: "enable_caching",
Help: `Caches the files and folders metadata to reduce API calls Help: `Caches the files and folders metadata to reduce API calls
Notice: If you are mounting ProtonDrive as a VFS, please disable this feature,
as the current implementation doesn't update or clear the cache when there are
external changes.
The files and folders on ProtonDrive are represented as links with keyrings, The files and folders on ProtonDrive are represented as links with keyrings,
which can be cached to improve performance and be friendly to the API server. which can be cached to improve performance and be friendly to the API server.
@@ -187,10 +140,9 @@ then we might have a problem with caching the stale data.`,
// Options defines the configuration for this backend // Options defines the configuration for this backend
type Options struct { type Options struct {
Username string `config:"username"` Username string `config:"username"`
Password string `config:"password"` Password string `config:"password"`
MailboxPassword string `config:"mailbox_password"` TwoFA string `config:"2fa"`
TwoFA string `config:"2fa"`
// advanced // advanced
Enc encoder.MultiEncoder `config:"encoding"` Enc encoder.MultiEncoder `config:"encoding"`
@@ -302,12 +254,12 @@ func clearConfigMap(m configmap.Mapper) {
} }
func authHandler(auth proton.Auth) { func authHandler(auth proton.Auth) {
// fs.Debugf("authHandler called") // log.Println("authHandler called")
setConfigMap(_mapper, auth.UID, auth.AccessToken, auth.RefreshToken, _saltedKeyPass) setConfigMap(_mapper, auth.UID, auth.AccessToken, auth.RefreshToken, _saltedKeyPass)
} }
func deAuthHandler() { func deAuthHandler() {
// fs.Debugf("deAuthHandler called") // log.Println("deAuthHandler called")
clearConfigMap(_mapper) clearConfigMap(_mapper)
} }
@@ -352,7 +304,6 @@ func newProtonDrive(ctx context.Context, f *Fs, opt *Options, m configmap.Mapper
config.UseReusableLogin = false config.UseReusableLogin = false
config.FirstLoginCredential.Username = opt.Username config.FirstLoginCredential.Username = opt.Username
config.FirstLoginCredential.Password = opt.Password config.FirstLoginCredential.Password = opt.Password
config.FirstLoginCredential.MailboxPassword = opt.MailboxPassword
config.FirstLoginCredential.TwoFA = opt.TwoFA config.FirstLoginCredential.TwoFA = opt.TwoFA
protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler) protonDrive, auth, err := protonDriveAPI.NewProtonDrive(ctx, config, authHandler, deAuthHandler)
if err != nil { if err != nil {
@@ -384,14 +335,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
} }
} }
if opt.MailboxPassword != "" {
var err error
opt.MailboxPassword, err = obscure.Reveal(opt.MailboxPassword)
if err != nil {
return nil, fmt.Errorf("couldn't decrypt mailbox password: %w", err)
}
}
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
root = strings.Trim(root, "/") root = strings.Trim(root, "/")
@@ -622,10 +565,12 @@ func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
return entries, nil return entries, nil
} }
// FindLeaf finds a directory of name leaf in the folder with ID pathID // DirCacher describes an interface for doing the low level directory work
// //
// This should be implemented by the backend and will be called by the // This should be implemented by the backend and will be called by the
// dircache package when appropriate. // dircache package when appropriate.
//
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) { func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, error) {
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */ /* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
@@ -644,10 +589,12 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (string, bool, e
return link.LinkID, true, nil return link.LinkID, true, nil
} }
// CreateDir makes a directory with pathID as parent and name leaf // DirCacher describes an interface for doing the low level directory work
// //
// This should be implemented by the backend and will be called by the // This should be implemented by the backend and will be called by the
// dircache package when appropriate. // dircache package when appropriate.
//
// CreateDir makes a directory with pathID as parent and name leaf
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) { func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) {
/* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */ /* f.opt.Enc.FromStandardName(leaf) not required since the DirCache only process sanitized path */
@@ -675,7 +622,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error)
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
size := src.Size() size := src.Size()
if size < 0 { if size < 0 {
return nil, errCanNotUploadFileWithUnknownSize return nil, ErrCanNotUploadFileWithUnknownSize
} }
existingObj, err := f.NewObject(ctx, src.Remote()) existingObj, err := f.NewObject(ctx, src.Remote())
@@ -774,7 +721,7 @@ func (f *Fs) DirCacheFlush() {
f.protonDrive.ClearCache() f.protonDrive.ClearCache()
} }
// Hashes returns the supported hash types of the filesystem // Returns the supported hash types of the filesystem
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA1) return hash.Set(hash.SHA1)
} }
@@ -833,7 +780,9 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
return *o.digests, nil return *o.digests, nil
} }
// sha1 not cached: we fetch and try to obtain the sha1 of the link // sha1 not cached
log.Println("sha1 not cached")
// we fetch and try to obtain the sha1 of the link
fileSystemAttrs, err := o.fs.protonDrive.GetActiveRevisionAttrsByID(ctx, o.ID()) fileSystemAttrs, err := o.fs.protonDrive.GetActiveRevisionAttrsByID(ctx, o.ID())
if err != nil { if err != nil {
return "", err return "", err
@@ -855,7 +804,7 @@ func (o *Object) Size() int64 {
return *o.originalSize return *o.originalSize
} }
fs.Debugf(o, "Original file size missing") fs.Errorf(o, "Original size should exist")
} }
return o.size return o.size
} }
@@ -934,7 +883,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
size := src.Size() size := src.Size()
if size < 0 { if size < 0 {
return errCanNotUploadFileWithUnknownSize return ErrCanNotUploadFileWithUnknownSize
} }
remote := o.Remote() remote := o.Remote()
@@ -992,7 +941,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
root := path.Join(f.root, dir) root := path.Join(f.root, dir)
if root == "" { if root == "" {
// we can't remove the root directory, but we can list the directory and delete every folder and file in here // we can't remove the root directory, but we can list the directory and delete every folder and file in here
return errCanNotPurgeRootDirectory return ErrCanNotPurgeRootDirectory
} }
folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false) folderLinkID, err := f.dirCache.FindDir(ctx, f.sanitizePath(dir), false)

View File

@@ -1,182 +0,0 @@
// Package api provides types used by the Quatrix API.
package api
import (
"strconv"
"time"
)
// OverwriteOnCopyMode is a conflict resolve mode during copy. Files with conflicting names will be overwritten
const OverwriteOnCopyMode = "overwrite"
// ProfileInfo is a profile info about quota
type ProfileInfo struct {
UserUsed int64 `json:"user_used"`
UserLimit int64 `json:"user_limit"`
AccUsed int64 `json:"acc_used"`
AccLimit int64 `json:"acc_limit"`
}
// IDList is a general object that contains list of ids
type IDList struct {
IDs []string `json:"ids"`
}
// DeleteParams is the request to delete object
type DeleteParams struct {
IDs []string `json:"ids"`
DeletePermanently bool `json:"delete_permanently"`
}
// FileInfoParams is the request to get object's (file or directory) info
type FileInfoParams struct {
ParentID string `json:"parent_id,omitempty"`
Path string `json:"path"`
}
// FileInfo is the response to get object's (file or directory) info
type FileInfo struct {
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
Src string `json:"src"`
Type string `json:"type"`
}
// IsFile returns true if object is a file
// false otherwise
func (fi *FileInfo) IsFile() bool {
if fi == nil {
return false
}
return fi.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (fi *FileInfo) IsDir() bool {
if fi == nil {
return false
}
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
}
// CreateDirParams is the request to create a directory
type CreateDirParams struct {
Target string `json:"target,omitempty"`
Name string `json:"name"`
Resolve bool `json:"resolve"`
}
// File represent metadata about object in Quatrix (file or directory)
type File struct {
ID string `json:"id"`
Created JSONTime `json:"created"`
Modified JSONTime `json:"modified"`
Name string `json:"name"`
ParentID string `json:"parent_id"`
Size int64 `json:"size"`
ModifiedMS JSONTime `json:"modified_ms"`
Type string `json:"type"`
Operations int `json:"operations"`
SubType string `json:"sub_type"`
Content []File `json:"content"`
}
// IsFile returns true if object is a file
// false otherwise
func (f *File) IsFile() bool {
if f == nil {
return false
}
return f.Type == "F"
}
// IsDir returns true if object is a directory
// false otherwise
func (f *File) IsDir() bool {
if f == nil {
return false
}
return f.Type == "D" || f.Type == "S" || f.Type == "T"
}
// SetMTimeParams is the request to set modification time for object
type SetMTimeParams struct {
ID string `json:"id,omitempty"`
MTime JSONTime `json:"mtime"`
}
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
type JSONTime time.Time
// MarshalJSON returns time representation in Unix time
func (u JSONTime) MarshalJSON() ([]byte, error) {
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
}
// UnmarshalJSON sets time from Unix time representation
func (u *JSONTime) UnmarshalJSON(data []byte) error {
f, err := strconv.ParseFloat(string(data), 64)
if err != nil {
return err
}
t := JSONTime(time.Unix(0, int64(f*1e9)))
*u = t
return nil
}
// String returns Unix time representation of time as string
func (u JSONTime) String() string {
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
}
// DownloadLinkResponse is the response to download-link request
type DownloadLinkResponse struct {
ID string `json:"id"`
}
// UploadLinkParams is the request to get upload-link
type UploadLinkParams struct {
Name string `json:"name"`
ParentID string `json:"parent_id"`
Resolve bool `json:"resolve"`
}
// UploadLinkResponse is the response to upload-link request
type UploadLinkResponse struct {
Name string `json:"name"`
FileID string `json:"file_id"`
ParentID string `json:"parent_id"`
UploadKey string `json:"upload_key"`
}
// UploadFinalizeResponse is the response to finalize file method
type UploadFinalizeResponse struct {
FileID string `json:"id"`
ParentID string `json:"parent_id"`
Modified int64 `json:"modified"`
FileSize int64 `json:"size"`
}
// FileModifyParams is the request to get modify file link
type FileModifyParams struct {
ID string `json:"id"`
Truncate int64 `json:"truncate"`
}
// FileCopyMoveOneParams is the request to do server-side copy and move
// can be used for file or directory
type FileCopyMoveOneParams struct {
ID string `json:"file_id"`
Target string `json:"target_id"`
Name string `json:"name"`
MTime JSONTime `json:"mtime"`
Resolve bool `json:"resolve"`
ResolveMode string `json:"resolve_mode"`
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,17 +0,0 @@
// Test Quatrix filesystem interface
package quatrix_test
import (
"testing"
"github.com/rclone/rclone/backend/quatrix"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestQuatrix:",
NilObject: (*quatrix.Object)(nil),
})
}

View File

@@ -1,108 +0,0 @@
package quatrix
import (
"sync"
"time"
"github.com/rclone/rclone/fs"
)
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
// does not have to wait for the slower ones until they finish upload.
type UploadMemoryManager struct {
m sync.Mutex
useDynamicSize bool
shared int64
reserved int64
effectiveTime time.Duration
fileUsage map[string]int64
}
// NewUploadMemoryManager is a constructor for UploadMemoryManager
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
useDynamicSize := true
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
if sharedMemory <= 0 {
sharedMemory = 0
useDynamicSize = false
}
return &UploadMemoryManager{
useDynamicSize: useDynamicSize,
shared: sharedMemory,
reserved: int64(opt.MinimalChunkSize),
effectiveTime: time.Duration(opt.EffectiveUploadTime),
fileUsage: map[string]int64{},
}
}
// Consume -- decide amount of memory to consume
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
if !u.useDynamicSize {
if neededMemory < u.reserved {
return neededMemory
}
return u.reserved
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if found {
u.shared += borrowed
borrowed = 0
}
defer func() { u.fileUsage[fileID] = borrowed }()
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
if effectiveChunkSize < u.reserved {
effectiveChunkSize = u.reserved
}
if neededMemory < effectiveChunkSize {
effectiveChunkSize = neededMemory
}
if effectiveChunkSize <= u.reserved {
return effectiveChunkSize
}
toBorrow := effectiveChunkSize - u.reserved
if toBorrow <= u.shared {
u.shared -= toBorrow
borrowed = toBorrow
return effectiveChunkSize
}
borrowed = u.shared
u.shared = 0
return borrowed + u.reserved
}
// Return returns consumed memory for the previous chunk upload to the memory pool
func (u *UploadMemoryManager) Return(fileID string) {
if !u.useDynamicSize {
return
}
u.m.Lock()
defer u.m.Unlock()
borrowed, found := u.fileUsage[fileID]
if !found {
return
}
u.shared += borrowed
delete(u.fileUsage, fileID)
}

File diff suppressed because it is too large Load Diff

View File

@@ -17,17 +17,17 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
} }
func TestRenewalInTimeLimit(t *testing.T) { func TestRenewalInTimeLimit(t *testing.T) {
var count atomic.Int64 var count int64
renew := NewRenew(100*time.Millisecond, func() error { renew := NewRenew(100*time.Millisecond, func() error {
count.Add(1) atomic.AddInt64(&count, 1)
return nil return nil
}) })
time.Sleep(time.Second) time.Sleep(time.Second)
renew.Shutdown() renew.Shutdown()
// there's no guarantee the CI agent can handle a simple goroutine // there's no guarantee the CI agent can handle a simple goroutine
renewCount := count.Load() renewCount := atomic.LoadInt64(&count)
t.Logf("renew count = %d", renewCount) t.Logf("renew count = %d", renewCount)
assert.Greater(t, renewCount, int64(0)) assert.Greater(t, renewCount, int64(0))
assert.Less(t, renewCount, int64(11)) assert.Less(t, renewCount, int64(11))

View File

@@ -232,16 +232,7 @@ E.g. the second example above should be rewritten as:
Default: "", Default: "",
Help: `Specifies the path or command to run a sftp server on the remote host. Help: `Specifies the path or command to run a sftp server on the remote host.
The subsystem option is ignored when server_command is defined. The subsystem option is ignored when server_command is defined.`,
If adding server_command to the configuration file please note that
it should not be enclosed in quotes, since that will make rclone fail.
A working example is:
[remote_name]
type = sftp
server_command = sudo /usr/libexec/openssh/sftp-server`,
Advanced: true, Advanced: true,
}, { }, {
Name: "use_fstat", Name: "use_fstat",
@@ -512,7 +503,7 @@ type Fs struct {
drain *time.Timer // used to drain the pool when we stop using the connections drain *time.Timer // used to drain the pool when we stop using the connections
pacer *fs.Pacer // pacer for operations pacer *fs.Pacer // pacer for operations
savedpswd string savedpswd string
sessions atomic.Int32 // count in use sessions sessions int32 // count in use sessions
} }
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading) // Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
@@ -580,17 +571,17 @@ func (c *conn) closed() error {
// //
// Call removeSession() when done // Call removeSession() when done
func (f *Fs) addSession() { func (f *Fs) addSession() {
f.sessions.Add(1) atomic.AddInt32(&f.sessions, 1)
} }
// Show the ssh session is no longer in use // Show the ssh session is no longer in use
func (f *Fs) removeSession() { func (f *Fs) removeSession() {
f.sessions.Add(-1) atomic.AddInt32(&f.sessions, -1)
} }
// getSessions shows whether there are any sessions in use // getSessions shows whether there are any sessions in use
func (f *Fs) getSessions() int32 { func (f *Fs) getSessions() int32 {
return f.sessions.Load() return atomic.LoadInt32(&f.sessions)
} }
// Open a new connection to the SFTP server. // Open a new connection to the SFTP server.
@@ -789,7 +780,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(opt.SSH) != 0 && ((opt.User != currentUser && opt.User != "") || opt.Host != "" || (opt.Port != "22" && opt.Port != "")) { if len(opt.SSH) != 0 && (opt.User != "" || opt.Host != "" || opt.Port != "") {
fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)") fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)")
} }
@@ -845,7 +836,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
pubkeyFile := env.ShellExpand(opt.PubKeyFile) pubkeyFile := env.ShellExpand(opt.PubKeyFile)
//keyPem := env.ShellExpand(opt.KeyPem) //keyPem := env.ShellExpand(opt.KeyPem)
// Add ssh agent-auth if no password or file or key PEM specified // Add ssh agent-auth if no password or file or key PEM specified
if (len(opt.SSH) == 0 && opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent { if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
sshAgentClient, _, err := sshagent.New() sshAgentClient, _, err := sshagent.New()
if err != nil { if err != nil {
return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err) return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
@@ -1014,7 +1005,7 @@ func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []st
// save it so on reconnection we give back the previous string. // save it so on reconnection we give back the previous string.
// This removes the ability to let the user correct a mistaken entry, // This removes the ability to let the user correct a mistaken entry,
// but means that reconnects are transparent. // but means that reconnects are transparent.
// We'll reuse config.Pass for this, 'cos we know it's not been // We'll re-use config.Pass for this, 'cos we know it's not been
// specified. // specified.
func (f *Fs) getPass() (string, error) { func (f *Fs) getPass() (string, error) {
for f.savedpswd == "" { for f.savedpswd == "" {
@@ -1602,7 +1593,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
fs.Debugf(f, "About path %q", aboutPath) fs.Debugf(f, "About path %q", aboutPath)
vfsStats, err = c.sftpClient.StatVFS(aboutPath) vfsStats, err = c.sftpClient.StatVFS(aboutPath)
} }
f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be reused f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be re-used
if vfsStats != nil { if vfsStats != nil {
total := vfsStats.TotalSpace() total := vfsStats.TotalSpace()
free := vfsStats.FreeSpace() free := vfsStats.FreeSpace()
@@ -2044,7 +2035,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil { if err != nil {
return fmt.Errorf("Update: %w", err) return fmt.Errorf("Update: %w", err)
} }
// Hang on to the connection for the whole upload so it doesn't get reused while we are uploading // Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC) file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil { if err != nil {
o.fs.putSftpConnection(&c, err) o.fs.putSftpConnection(&c, err)

View File

@@ -47,7 +47,7 @@ func (s *sshClientExternal) Close() error {
// NewSession makes a new external SSH connection // NewSession makes a new external SSH connection
func (s *sshClientExternal) NewSession() (sshSession, error) { func (s *sshClientExternal) NewSession() (sshSession, error) {
session := s.f.newSSHSessionExternal() session := s.f.newSshSessionExternal()
if s.session == nil { if s.session == nil {
fs.Debugf(s.f, "ssh external: creating additional session") fs.Debugf(s.f, "ssh external: creating additional session")
} }
@@ -77,7 +77,7 @@ type sshSessionExternal struct {
runningSFTP bool runningSFTP bool
} }
func (f *Fs) newSSHSessionExternal() *sshSessionExternal { func (f *Fs) newSshSessionExternal() *sshSessionExternal {
s := &sshSessionExternal{ s := &sshSessionExternal{
f: f, f: f,
} }

View File

@@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"sync/atomic"
"time" "time"
smb2 "github.com/hirochachacha/go-smb2" smb2 "github.com/hirochachacha/go-smb2"
@@ -88,17 +89,17 @@ func (c *conn) closed() bool {
// //
// Call removeSession() when done // Call removeSession() when done
func (f *Fs) addSession() { func (f *Fs) addSession() {
f.sessions.Add(1) atomic.AddInt32(&f.sessions, 1)
} }
// Show the SMB session is no longer in use // Show the SMB session is no longer in use
func (f *Fs) removeSession() { func (f *Fs) removeSession() {
f.sessions.Add(-1) atomic.AddInt32(&f.sessions, -1)
} }
// getSessions shows whether there are any sessions in use // getSessions shows whether there are any sessions in use
func (f *Fs) getSessions() int32 { func (f *Fs) getSessions() int32 {
return f.sessions.Load() return atomic.LoadInt32(&f.sessions)
} }
// Open a new connection to the SMB server. // Open a new connection to the SMB server.

View File

@@ -9,7 +9,6 @@ import (
"path" "path"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -141,7 +140,7 @@ type Fs struct {
features *fs.Features // optional features features *fs.Features // optional features
pacer *fs.Pacer // pacer for operations pacer *fs.Pacer // pacer for operations
sessions atomic.Int32 sessions int32
poolMu sync.Mutex poolMu sync.Mutex
pool []*conn pool []*conn
drain *time.Timer // used to drain the pool when we stop using the connections drain *time.Timer // used to drain the pool when we stop using the connections

View File

@@ -561,7 +561,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
// returned as 0 bytes in the listing. Correct this here by // returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files. // making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects. // We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" && !o.fs.opt.NoLargeObjects { if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
err := o.readMetaData(ctx) // reads info and headers, returning an error err := o.readMetaData(ctx) // reads info and headers, returning an error
if err == fs.ErrorObjectNotFound { if err == fs.ErrorObjectNotFound {
// We have a dangling large object here so just return the original metadata // We have a dangling large object here so just return the original metadata

View File

@@ -17,9 +17,8 @@ import (
// This is a wrapped object which returns the Union Fs as its parent // This is a wrapped object which returns the Union Fs as its parent
type Object struct { type Object struct {
*upstream.Object *upstream.Object
fs *Fs // what this object is part of fs *Fs // what this object is part of
co []upstream.Entry co []upstream.Entry
writebackMu sync.Mutex
} }
// Directory describes a union Directory // Directory describes a union Directory
@@ -35,13 +34,6 @@ type entry interface {
candidates() []upstream.Entry candidates() []upstream.Entry
} }
// Update o with the contents of newO excluding the lock
func (o *Object) update(newO *Object) {
o.Object = newO.Object
o.fs = newO.fs
o.co = newO.co
}
// UnWrapUpstream returns the upstream Object that this Object is wrapping // UnWrapUpstream returns the upstream Object that this Object is wrapping
func (o *Object) UnWrapUpstream() *upstream.Object { func (o *Object) UnWrapUpstream() *upstream.Object {
return o.Object return o.Object
@@ -75,7 +67,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err return err
} }
// Update current object // Update current object
o.update(newO.(*Object)) *o = *newO.(*Object)
return nil return nil
} else if err != nil { } else if err != nil {
return err return err
@@ -183,25 +175,6 @@ func (o *Object) SetTier(tier string) error {
return do.SetTier(tier) return do.SetTier(tier)
} }
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
// Need some sort of locking to prevent multiple downloads
o.writebackMu.Lock()
defer o.writebackMu.Unlock()
// FIXME what if correct object is already in o.co
newObj, err := o.Object.Writeback(ctx)
if err != nil {
return nil, err
}
if newObj != nil {
o.Object = newObj
o.co = append(o.co, newObj) // FIXME should this append or overwrite or update?
}
return o.Object.Object.Open(ctx, options...)
}
// ModTime returns the modification date of the directory // ModTime returns the modification date of the directory
// It returns the latest ModTime of all candidates // It returns the latest ModTime of all candidates
func (d *Directory) ModTime(ctx context.Context) (t time.Time) { func (d *Directory) ModTime(ctx context.Context) (t time.Time) {

View File

@@ -877,10 +877,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
opt: *opt, opt: *opt,
upstreams: usedUpstreams, upstreams: usedUpstreams,
} }
err = upstream.Prepare(f.upstreams)
if err != nil {
return nil, err
}
f.actionPolicy, err = policy.Get(opt.ActionPolicy) f.actionPolicy, err = policy.Get(opt.ActionPolicy)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -12,7 +12,7 @@ import (
) )
var ( var (
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"} unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt"}
unimplementableObjectMethods = []string{} unimplementableObjectMethods = []string{}
) )

View File

@@ -16,7 +16,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/operations"
) )
var ( var (
@@ -26,6 +25,10 @@ var (
// Fs is a wrap of any fs and its configs // Fs is a wrap of any fs and its configs
type Fs struct { type Fs struct {
// In order to ensure memory alignment on 32-bit architectures
// when this field is accessed through sync/atomic functions,
// it must be the first entry in the struct
cacheExpiry int64 // usage cache expiry time
fs.Fs fs.Fs
RootFs fs.Fs RootFs fs.Fs
RootPath string RootPath string
@@ -34,12 +37,9 @@ type Fs struct {
creatable bool creatable bool
usage *fs.Usage // Cache the usage usage *fs.Usage // Cache the usage
cacheTime time.Duration // cache duration cacheTime time.Duration // cache duration
cacheExpiry atomic.Int64 // usage cache expiry time
cacheMutex sync.RWMutex cacheMutex sync.RWMutex
cacheOnce sync.Once cacheOnce sync.Once
cacheUpdate bool // if the cache is updating cacheUpdate bool // if the cache is updating
writeback bool // writeback to this upstream
writebackFs *Fs // if non zero, writeback to this upstream
} }
// Directory describes a wrapped Directory // Directory describes a wrapped Directory
@@ -73,14 +73,14 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
return nil, err return nil, err
} }
f := &Fs{ f := &Fs{
RootPath: strings.TrimRight(root, "/"), RootPath: strings.TrimRight(root, "/"),
Opt: opt, Opt: opt,
writable: true, writable: true,
creatable: true, creatable: true,
cacheTime: time.Duration(opt.CacheTime) * time.Second, cacheExpiry: time.Now().Unix(),
usage: &fs.Usage{}, cacheTime: time.Duration(opt.CacheTime) * time.Second,
usage: &fs.Usage{},
} }
f.cacheExpiry.Store(time.Now().Unix())
if strings.HasSuffix(fsPath, ":ro") { if strings.HasSuffix(fsPath, ":ro") {
f.writable = false f.writable = false
f.creatable = false f.creatable = false
@@ -89,9 +89,6 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
f.writable = true f.writable = true
f.creatable = false f.creatable = false
fsPath = fsPath[0 : len(fsPath)-3] fsPath = fsPath[0 : len(fsPath)-3]
} else if strings.HasSuffix(fsPath, ":writeback") {
f.writeback = true
fsPath = fsPath[0 : len(fsPath)-len(":writeback")]
} }
remote = configName + fsPath remote = configName + fsPath
rFs, err := cache.Get(ctx, remote) rFs, err := cache.Get(ctx, remote)
@@ -109,29 +106,6 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
return f, err return f, err
} }
// Prepare the configured upstreams as a group
func Prepare(fses []*Fs) error {
writebacks := 0
var writebackFs *Fs
for _, f := range fses {
if f.writeback {
writebackFs = f
writebacks++
}
}
if writebacks == 0 {
return nil
} else if writebacks > 1 {
return fmt.Errorf("can only have 1 :writeback not %d", writebacks)
}
for _, f := range fses {
if !f.writeback {
f.writebackFs = writebackFs
}
}
return nil
}
// WrapDirectory wraps an fs.Directory to include the info // WrapDirectory wraps an fs.Directory to include the info
// of the upstream Fs // of the upstream Fs
func (f *Fs) WrapDirectory(e fs.Directory) *Directory { func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
@@ -322,31 +296,9 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
return do.Metadata(ctx) return do.Metadata(ctx)
} }
// Writeback writes the object back and returns a new object
//
// If it returns nil, nil then the original object is OK
func (o *Object) Writeback(ctx context.Context) (*Object, error) {
if o.f.writebackFs == nil {
return nil, nil
}
newObj, err := operations.Copy(ctx, o.f.writebackFs.Fs, nil, o.Object.Remote(), o.Object)
if err != nil {
return nil, err
}
// newObj could be nil here
if newObj == nil {
fs.Errorf(o, "nil Object returned from operations.Copy")
return nil, nil
}
return &Object{
Object: newObj,
f: o.f,
}, err
}
// About gets quota information from the Fs // About gets quota information from the Fs
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() { if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage() err := f.updateUsage()
if err != nil { if err != nil {
return nil, ErrUsageFieldNotSupported return nil, ErrUsageFieldNotSupported
@@ -361,7 +313,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
// //
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel // This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
func (f *Fs) GetFreeSpace() (int64, error) { func (f *Fs) GetFreeSpace() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() { if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage() err := f.updateUsage()
if err != nil { if err != nil {
return math.MaxInt64 - 1, ErrUsageFieldNotSupported return math.MaxInt64 - 1, ErrUsageFieldNotSupported
@@ -379,7 +331,7 @@ func (f *Fs) GetFreeSpace() (int64, error) {
// //
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel // This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
func (f *Fs) GetUsedSpace() (int64, error) { func (f *Fs) GetUsedSpace() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() { if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage() err := f.updateUsage()
if err != nil { if err != nil {
return 0, ErrUsageFieldNotSupported return 0, ErrUsageFieldNotSupported
@@ -395,7 +347,7 @@ func (f *Fs) GetUsedSpace() (int64, error) {
// GetNumObjects get the number of objects of the fs // GetNumObjects get the number of objects of the fs
func (f *Fs) GetNumObjects() (int64, error) { func (f *Fs) GetNumObjects() (int64, error) {
if f.cacheExpiry.Load() <= time.Now().Unix() { if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
err := f.updateUsage() err := f.updateUsage()
if err != nil { if err != nil {
return 0, ErrUsageFieldNotSupported return 0, ErrUsageFieldNotSupported
@@ -450,7 +402,7 @@ func (f *Fs) updateUsageCore(lock bool) error {
defer f.cacheMutex.Unlock() defer f.cacheMutex.Unlock()
} }
// Store usage // Store usage
f.cacheExpiry.Store(time.Now().Add(f.cacheTime).Unix()) atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
f.usage = usage f.usage = usage
return nil return nil
} }

View File

@@ -121,7 +121,7 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
getBody := func() (io.ReadCloser, error) { getBody := func() (io.ReadCloser, error) {
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy // RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
if _, err := in.Seek(0, io.SeekStart); err != nil { if _, err := in.Seek(0, io.SeekStart); err == nil {
return nil, err return nil, err
} }
@@ -203,7 +203,7 @@ func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) erro
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil) resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
// directory doesn't exist, no need to purge // directory doesn't exist, no need to purge
if resp != nil && resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
return false, nil return false, nil
} }

View File

@@ -17,6 +17,7 @@ import (
"github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/backend/zoho/api" "github.com/rclone/rclone/backend/zoho/api"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -1168,8 +1169,31 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if o.id == "" { if o.id == "" {
return nil, errors.New("can't download - no id") return nil, errors.New("can't download - no id")
} }
var start, end int64 = 0, o.size
partialContent := false
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
start = x.Offset
partialContent = true
case *fs.RangeOption:
if x.Start >= 0 {
start = x.Start
if x.End > 0 && x.End < o.size {
end = x.End + 1
}
} else {
// {-1, 20} should load the last 20 characters [len-20:len]
start = o.size - x.End
}
partialContent = true
default:
if option.Mandatory() {
fs.Logf(nil, "Unsupported mandatory option: %v", option)
}
}
}
var resp *http.Response var resp *http.Response
fs.FixRangeOption(options, o.size)
opts := rest.Opts{ opts := rest.Opts{
Method: "GET", Method: "GET",
Path: "/download/" + o.id, Path: "/download/" + o.id,
@@ -1182,6 +1206,20 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil { if err != nil {
return nil, err return nil, err
} }
if partialContent && resp.StatusCode == 200 && resp.Header.Get("Content-Range") == "" {
if start > 0 {
// We need to read and discard the beginning of the data...
_, err = io.CopyN(io.Discard, resp.Body, start)
if err != nil {
if resp != nil {
_ = resp.Body.Close()
}
return nil, err
}
}
// ... and return a limited reader for the remaining of the data
return readers.NewLimitedReadCloser(resp.Body, end-start), nil
}
return resp.Body, nil return resp.Body, nil
} }

View File

@@ -6,4 +6,3 @@
<abhi18av@users.noreply.github.com> <abhi18av@users.noreply.github.com>
<ankur0493@gmail.com> <ankur0493@gmail.com>
<agupta@egnyte.com> <agupta@egnyte.com>
<ricci@disroot.org>

View File

@@ -25,7 +25,6 @@ docs = [
"flags.md", "flags.md",
"docker.md", "docker.md",
"bisync.md", "bisync.md",
"release_signing.md",
# Keep these alphabetical by full name # Keep these alphabetical by full name
"fichier.md", "fichier.md",
@@ -62,7 +61,6 @@ docs = [
"opendrive.md", "opendrive.md",
"oracleobjectstorage.md", "oracleobjectstorage.md",
"qingstor.md", "qingstor.md",
"quatrix.md",
"sia.md", "sia.md",
"swift.md", "swift.md",
"pcloud.md", "pcloud.md",
@@ -70,7 +68,6 @@ docs = [
"premiumizeme.md", "premiumizeme.md",
"protondrive.md", "protondrive.md",
"putio.md", "putio.md",
"protondrive.md",
"seafile.md", "seafile.md",
"sftp.md", "sftp.md",
"smb.md", "smb.md",

View File

@@ -22,8 +22,8 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "") flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable", "") flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable")
} }
// printValue formats uv to be output // printValue formats uv to be output
@@ -95,7 +95,6 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
`, `,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.41", "versionIntroduced": "v1.41",
// "groups": "",
}, },
Run: func(command *cobra.Command, args []string) { Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args) cmd.CheckArgs(1, 1, command, args)

View File

@@ -18,8 +18,8 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser", "") flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses", "") flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{
@@ -36,7 +36,6 @@ link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`, Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.27", "versionIntroduced": "v1.27",
// "groups": "",
}, },
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 3, command, args) cmd.CheckArgs(1, 3, command, args)

View File

@@ -24,8 +24,8 @@ var (
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name", "") flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name")
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format", "") flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format")
} }
var commandDefinition = &cobra.Command{ var commandDefinition = &cobra.Command{
@@ -60,7 +60,6 @@ Note to run these commands on a running backend then see
`, `,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.52", "versionIntroduced": "v1.52",
"groups": "Important",
}, },
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 1e6, command, args) cmd.CheckArgs(2, 1e6, command, args)

View File

@@ -614,8 +614,6 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
opt.DryRun = true opt.DryRun = true
case "force": case "force":
opt.Force = true opt.Force = true
case "create-empty-src-dirs":
opt.CreateEmptySrcDirs = true
case "remove-empty-dirs": case "remove-empty-dirs":
opt.RemoveEmptyDirs = true opt.RemoveEmptyDirs = true
case "check-sync-only": case "check-sync-only":
@@ -1165,10 +1163,6 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
b.workDir + slash, "{workdir/}", b.workDir + slash, "{workdir/}",
b.path1, "{path1/}", b.path1, "{path1/}",
b.path2, "{path2/}", b.path2, "{path2/}",
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
strings.TrimSuffix(b.path2, slash), "{path2}",
b.sessionName, "{session}", b.sessionName, "{session}",
} }
if fixSlash { if fixSlash {

View File

@@ -27,21 +27,18 @@ import (
// Options keep bisync options // Options keep bisync options
type Options struct { type Options struct {
Resync bool Resync bool
CheckAccess bool CheckAccess bool
CheckFilename string CheckFilename string
CheckSync CheckSyncMode CheckSync CheckSyncMode
CreateEmptySrcDirs bool RemoveEmptyDirs bool
RemoveEmptyDirs bool MaxDelete int // percentage from 0 to 100
MaxDelete int // percentage from 0 to 100 Force bool
Force bool FiltersFile string
FiltersFile string Workdir string
Workdir string DryRun bool
DryRun bool NoCleanup bool
NoCleanup bool SaveQueues bool // save extra debugging files (test only flag)
SaveQueues bool // save extra debugging files (test only flag)
IgnoreListingChecksum bool
Resilient bool
} }
// Default values // Default values
@@ -101,19 +98,16 @@ var Opt Options
func init() { func init() {
cmd.Root.AddCommand(commandDefinition) cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags() cmdFlags := commandDefinition.Flags()
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "") flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.")
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "") flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."))
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "") flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"))
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "") flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose")
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "") flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)")
flags.BoolVarP(cmdFlags, &Opt.CreateEmptySrcDirs, "create-empty-src-dirs", "", Opt.CreateEmptySrcDirs, "Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)", "") flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove empty directories at the final cleanup step.")
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "") flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file")
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "") flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"))
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "") flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)")
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "") flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).")
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
} }
// bisync command definition // bisync command definition
@@ -123,7 +117,6 @@ var commandDefinition = &cobra.Command{
Long: longHelp, Long: longHelp,
Annotations: map[string]string{ Annotations: map[string]string{
"versionIntroduced": "v1.58", "versionIntroduced": "v1.58",
"groups": "Filter,Copy,Important",
}, },
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 2, command, args) cmd.CheckArgs(2, 2, command, args)
@@ -216,13 +209,9 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
} }
if opt.Resync { if opt.Resync {
if opt.DryRun { fs.Infof(nil, "Storing filters file hash to %s", hashFile)
fs.Infof(nil, "Skipped storing filters file hash to %s as --dry-run is set", hashFile) if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
} else { return ctx, err
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
return ctx, err
}
} }
} }

View File

@@ -3,18 +3,13 @@
package bisync package bisync
import ( import (
"bytes"
"context" "context"
"fmt" "fmt"
"path/filepath" "path/filepath"
"sort" "sort"
"strings"
"github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/cmd/check"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
@@ -95,47 +90,6 @@ func (ds *deltaSet) printStats() {
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted) ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
} }
// check potential conflicts (to avoid renaming if already identical)
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
matches := bilib.Names{}
if filterCheck.HaveFilesFrom() {
fs.Debugf(nil, "There are potential conflicts to check.")
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
if checkopterr != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
return matches, checkopterr
}
defer close()
opt.Match = new(bytes.Buffer)
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
// note that cryptCheck() is not currently exported
fs.Infof(nil, "Checking potential conflicts...")
check := operations.Check(ctxCheck, opt)
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
//reset error count, because we don't want to count check errors as bisync errors
accounting.Stats(ctxCheck).ResetErrors()
//return the list of identical files to check against later
if len(fmt.Sprint(opt.Match)) > 0 {
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
}
if matches.NotEmpty() {
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
} else {
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
}
}
return matches, nil
}
// findDeltas // findDeltas
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) { func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
var old, now *fileList var old, now *fileList
@@ -229,52 +183,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
ctxMove := b.opt.setDryRun(ctx) ctxMove := b.opt.setDryRun(ctx)
// efficient isDir check
// we load the listing just once and store only the dirs
dirs1, dirs1Err := b.listDirsOnly(1)
if dirs1Err != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "Error generating dirsonly list for path1: %v", dirs1Err)
return
}
dirs2, dirs2Err := b.listDirsOnly(2)
if dirs2Err != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "Error generating dirsonly list for path2: %v", dirs2Err)
return
}
// build a list of only the "deltaOther"s so we don't have to check more files than necessary
// this is essentially the same as running rclone check with a --files-from filter, then exempting the --match results from being renamed
// we therefore avoid having to list the same directory more than once.
// we are intentionally overriding DryRun here because we need to perform the check, even during a dry run, or the results would be inaccurate.
// check is a read-only operation by its nature, so it's already "dry" in that sense.
ctxNew, ciCheck := fs.AddConfig(ctx)
ciCheck.DryRun = false
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
for _, file := range ds1.sort() {
d1 := ds1.deltas[file]
if d1.is(deltaOther) {
d2 := ds2.deltas[file]
if d2.is(deltaOther) {
if err := filterCheck.AddFile(file); err != nil {
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
} else {
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
}
}
}
}
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
for _, file := range ds1.sort() { for _, file := range ds1.sort() {
p1 := path1 + file p1 := path1 + file
p2 := path2 + file p2 := path2 + file
@@ -291,34 +199,22 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
handled.Add(file) handled.Add(file)
} else if d2.is(deltaOther) { } else if d2.is(deltaOther) {
b.indent("!WARNING", file, "New or changed in both paths") b.indent("!WARNING", file, "New or changed in both paths")
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
//if files are identical, leave them alone instead of renaming if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
if dirs1.has(file) && dirs2.has(file) { err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file) b.critical = true
} else { return
equal := matches.Has(file)
if equal {
fs.Infof(nil, "Files are equal! Skipping: %s", file)
} else {
fs.Debugf(nil, "Files are NOT equal: %s", file)
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
b.critical = true
return
}
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
copy1to2.Add(file + "..path1")
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
return
}
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
copy2to1.Add(file + "..path2")
}
} }
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
copy1to2.Add(file + "..path1")
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
return
}
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
copy2to1.Add(file + "..path2")
handled.Add(file) handled.Add(file)
} }
} else { } else {
@@ -362,9 +258,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil { if err != nil {
return return
} }
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
} }
if copy1to2.NotEmpty() { if copy1to2.NotEmpty() {
@@ -374,9 +267,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil { if err != nil {
return return
} }
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
} }
if delete1.NotEmpty() { if delete1.NotEmpty() {
@@ -386,9 +276,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil { if err != nil {
return return
} }
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
} }
if delete2.NotEmpty() { if delete2.NotEmpty() {
@@ -398,9 +285,6 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
if err != nil { if err != nil {
return return
} }
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
} }
return return

View File

@@ -27,16 +27,11 @@ var rcHelp = makeHelp(`This takes the following parameters
- checkFilename - file name for checkAccess (default: {CHECKFILE}) - checkFilename - file name for checkAccess (default: {CHECKFILE})
- maxDelete - abort sync if percentage of deleted files is above - maxDelete - abort sync if percentage of deleted files is above
this threshold (default: {MAXDELETE}) this threshold (default: {MAXDELETE})
- force - Bypass maxDelete safety check and run the sync - force - maxDelete safety check and run the sync
- checkSync - |true| by default, |false| disables comparison of final listings, - checkSync - |true| by default, |false| disables comparison of final listings,
|only| will skip sync, only compare listings from the last run |only| will skip sync, only compare listings from the last run
- createEmptySrcDirs - Sync creation and deletion of empty directories.
(Not compatible with --remove-empty-dirs)
- removeEmptyDirs - remove empty directories at the final cleanup step - removeEmptyDirs - remove empty directories at the final cleanup step
- filtersFile - read filtering patterns from a file - filtersFile - read filtering patterns from a file
- ignoreListingChecksum - Do not use checksums for listings
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
Use at your own risk!
- workdir - server directory for history files (default: {WORKDIR}) - workdir - server directory for history files (default: {WORKDIR})
- noCleanup - retain working files - noCleanup - retain working files

View File

@@ -43,11 +43,10 @@ var tzLocal = false
// fileInfo describes a file // fileInfo describes a file
type fileInfo struct { type fileInfo struct {
size int64 size int64
time time.Time time time.Time
hash string hash string
id string id string
flags string
} }
// fileList represents a listing // fileList represents a listing
@@ -77,18 +76,17 @@ func (ls *fileList) get(file string) *fileInfo {
return ls.info[file] return ls.info[file]
} }
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) { func (ls *fileList) put(file string, size int64, time time.Time, hash, id string) {
fi := ls.get(file) fi := ls.get(file)
if fi != nil { if fi != nil {
fi.size = size fi.size = size
fi.time = time fi.time = time
} else { } else {
fi = &fileInfo{ fi = &fileInfo{
size: size, size: size,
time: time, time: time,
hash: hash, hash: hash,
id: id, id: id,
flags: flags,
} }
ls.info[file] = fi ls.info[file] = fi
ls.list = append(ls.list, file) ls.list = append(ls.list, file)
@@ -154,11 +152,7 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
id = "-" id = "-"
} }
flags := fi.flags flags := "-"
if flags == "" {
flags = "-"
}
_, err = fmt.Fprintf(file, lineFormat, flags, fi.size, hash, id, time, remote) _, err = fmt.Fprintf(file, lineFormat, flags, fi.size, hash, id, time, remote)
if err != nil { if err != nil {
_ = file.Close() _ = file.Close()
@@ -223,7 +217,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
} }
} }
if (flags != "-" && flags != "d") || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil { if flags != "-" || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
fs.Logf(listing, "Ignoring incorrect line: %q", line) fs.Logf(listing, "Ignoring incorrect line: %q", line)
continue continue
} }
@@ -235,7 +229,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
} }
} }
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id, flags) ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id)
} }
return ls, nil return ls, nil
@@ -259,20 +253,15 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
depth := ci.MaxDepth depth := ci.MaxDepth
hashType := hash.None hashType := hash.None
if !b.opt.IgnoreListingChecksum { if !ci.IgnoreChecksum {
// Currently bisync just honors --ignore-listing-checksum // Currently bisync just honors --ignore-checksum
// (note that this is different from --ignore-checksum)
// TODO add full support for checksums and related flags // TODO add full support for checksums and related flags
hashType = f.Hashes().GetOne() hashType = f.Hashes().GetOne()
} }
ls = newFileList() ls = newFileList()
ls.hash = hashType ls.hash = hashType
var lock sync.Mutex var lock sync.Mutex
listType := walk.ListObjects err = walk.ListR(ctx, f, "", false, depth, walk.ListObjects, func(entries fs.DirEntries) error {
if b.opt.CreateEmptySrcDirs {
listType = walk.ListAll
}
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
var firstErr error var firstErr error
entries.ForObject(func(o fs.Object) { entries.ForObject(func(o fs.Object) {
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO //tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
@@ -287,27 +276,12 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
} }
} }
time := o.ModTime(ctx).In(TZ) time := o.ModTime(ctx).In(TZ)
id := "" // TODO id := "" // TODO
flags := "-" // "-" for a file and "d" for a directory
lock.Lock() lock.Lock()
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags) ls.put(o.Remote(), o.Size(), time, hashVal, id)
lock.Unlock() lock.Unlock()
//tr.Done(ctx, nil) // TODO //tr.Done(ctx, nil) // TODO
}) })
if b.opt.CreateEmptySrcDirs {
entries.ForDir(func(o fs.Directory) {
var (
hashVal string
)
time := o.ModTime(ctx).In(TZ)
id := "" // TODO
flags := "d" // "-" for a file and "d" for a directory
lock.Lock()
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
ls.put(o.Remote(), 0, time, hashVal, id, flags)
lock.Unlock()
})
}
return firstErr return firstErr
}) })
if err == nil { if err == nil {
@@ -326,53 +300,5 @@ func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
} }
fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing) fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing)
b.critical = true b.critical = true
b.retryable = true
return fmt.Errorf("empty %s listing: %s", msg, listing) return fmt.Errorf("empty %s listing: %s", msg, listing)
} }
// listingNum should be 1 for path1 or 2 for path2
func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
listingpath := b.basePath + ".path1.lst-new"
if listingNum == 2 {
listingpath = b.basePath + ".path2.lst-new"
}
if b.opt.DryRun {
listingpath = strings.Replace(listingpath, ".lst-", ".lst-dry-", 1)
}
fs.Debugf(nil, "loading listing for path %d at: %s", listingNum, listingpath)
return b.loadListing(listingpath)
}
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
var fulllisting *fileList
var dirsonly = newFileList()
var err error
if !b.opt.CreateEmptySrcDirs {
return dirsonly, err
}
fulllisting, err = b.loadListingNum(listingNum)
if err != nil {
b.critical = true
b.retryable = true
fs.Debugf(nil, "Error loading listing to generate dirsonly list: %v", err)
return dirsonly, err
}
for _, obj := range fulllisting.list {
info := fulllisting.get(obj)
if info.flags == "d" {
fs.Debugf(nil, "found a dir: %s", obj)
dirsonly.put(obj, info.size, info.time, info.hash, info.id, info.flags)
} else {
fs.Debugf(nil, "not a dir: %s", obj)
}
}
return dirsonly, err
}

View File

@@ -25,14 +25,13 @@ var ErrBisyncAborted = errors.New("bisync aborted")
// bisyncRun keeps bisync runtime state // bisyncRun keeps bisync runtime state
type bisyncRun struct { type bisyncRun struct {
fs1 fs.Fs fs1 fs.Fs
fs2 fs.Fs fs2 fs.Fs
abort bool abort bool
critical bool critical bool
retryable bool basePath string
basePath string workDir string
workDir string opt *Options
opt *Options
} }
// Bisync handles lock file, performs bisync run and checks exit status // Bisync handles lock file, performs bisync run and checks exit status
@@ -124,19 +123,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
} }
if b.critical { if b.critical {
if b.retryable && b.opt.Resilient { if bilib.FileExists(listing1) {
fs.Errorf(nil, "Bisync critical error: %v", err) _ = os.Rename(listing1, listing1+"-err")
fs.Errorf(nil, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")
} else {
if bilib.FileExists(listing1) {
_ = os.Rename(listing1, listing1+"-err")
}
if bilib.FileExists(listing2) {
_ = os.Rename(listing2, listing2+"-err")
}
fs.Errorf(nil, "Bisync critical error: %v", err)
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
} }
if bilib.FileExists(listing2) {
_ = os.Rename(listing2, listing2+"-err")
}
fs.Errorf(nil, "Bisync critical error: %v", err)
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
return ErrBisyncAborted return ErrBisyncAborted
} }
if b.abort { if b.abort {
@@ -158,7 +152,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2)) fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
if err = b.checkSync(listing1, listing2); err != nil { if err = b.checkSync(listing1, listing2); err != nil {
b.critical = true b.critical = true
b.retryable = true
} }
return err return err
} }
@@ -183,7 +176,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
var fctx context.Context var fctx context.Context
if fctx, err = b.opt.applyFilters(octx); err != nil { if fctx, err = b.opt.applyFilters(octx); err != nil {
b.critical = true b.critical = true
b.retryable = true
return return
} }
@@ -196,7 +188,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) { if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) {
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs // On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
b.critical = true b.critical = true
b.retryable = true
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run") return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run")
} }
@@ -224,7 +215,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles) err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
if err != nil { if err != nil {
b.critical = true b.critical = true
b.retryable = true
return return
} }
} }
@@ -265,7 +255,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
changes1, changes2, err = b.applyDeltas(octx, ds1, ds2) changes1, changes2, err = b.applyDeltas(octx, ds1, ds2)
if err != nil { if err != nil {
b.critical = true b.critical = true
// b.retryable = true // not sure about this one
return err return err
} }
} }
@@ -294,7 +283,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
} }
if err != nil { if err != nil {
b.critical = true b.critical = true
b.retryable = true
return err return err
} }
@@ -322,7 +310,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
} }
if err != nil { if err != nil {
b.critical = true b.critical = true
b.retryable = true
return err return err
} }
} }
@@ -354,39 +341,6 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
return err return err
} }
// Check access health on the Path1 and Path2 filesystems
// enforce even though this is --resync
if b.opt.CheckAccess {
fs.Infof(nil, "Checking access health")
ds1 := &deltaSet{
checkFiles: bilib.Names{},
}
ds2 := &deltaSet{
checkFiles: bilib.Names{},
}
for _, file := range filesNow1.list {
if filepath.Base(file) == b.opt.CheckFilename {
ds1.checkFiles.Add(file)
}
}
for _, file := range filesNow2.list {
if filepath.Base(file) == b.opt.CheckFilename {
ds2.checkFiles.Add(file)
}
}
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
if err != nil {
b.critical = true
b.retryable = true
return err
}
}
copy2to1 := []string{} copy2to1 := []string{}
for _, file := range filesNow2.list { for _, file := range filesNow2.list {
if !filesNow1.has(file) { if !filesNow1.has(file) {
@@ -413,34 +367,11 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
// prevent overwriting Google Doc files (their size is -1) // prevent overwriting Google Doc files (their size is -1)
filterSync.Opt.MinSize = 0 filterSync.Opt.MinSize = 0
} }
if err = sync.CopyDir(ctxSync, b.fs2, b.fs1, b.opt.CreateEmptySrcDirs); err != nil { if err = sync.Sync(ctxSync, b.fs2, b.fs1, false); err != nil {
b.critical = true b.critical = true
return err return err
} }
if b.opt.CreateEmptySrcDirs {
// copy Path2 back to Path1, for empty dirs
// the fastCopy above cannot include directories, because it relies on --files-from for filtering,
// so instead we'll copy them here, relying on fctx for our filtering.
// This preserves the original resync order for backward compatibility. It is essentially:
// rclone copy Path2 Path1 --ignore-existing
// rclone copy Path1 Path2 --create-empty-src-dirs
// rclone copy Path2 Path1 --create-empty-src-dirs
// although if we were starting from scratch, it might be cleaner and faster to just do:
// rclone copy Path2 Path1 --create-empty-src-dirs
// rclone copy Path1 Path2 --create-empty-src-dirs
fs.Infof(nil, "Resynching Path2 to Path1 (for empty dirs)")
//note copy (not sync) and dst comes before src
if err = sync.CopyDir(ctxSync, b.fs1, b.fs2, b.opt.CreateEmptySrcDirs); err != nil {
b.critical = true
return err
}
}
fs.Infof(nil, "Resync updating listings") fs.Infof(nil, "Resync updating listings")
if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil { if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil {
b.critical = true b.critical = true

View File

@@ -3,7 +3,6 @@ package bisync
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"github.com/rclone/rclone/cmd/bisync/bilib" "github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
@@ -24,7 +23,7 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
} }
} }
return sync.CopyDir(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs) return sync.CopyDir(ctxCopy, fdst, fsrc, false)
} }
func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error { func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error {
@@ -33,14 +32,7 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
} }
transfers := fs.GetConfig(ctx).Transfers transfers := fs.GetConfig(ctx).Transfers
ctxRun := b.opt.setDryRun(ctx)
ctxRun, filterDelete := filter.AddConfig(b.opt.setDryRun(ctx))
for _, file := range files.ToList() {
if err := filterDelete.AddFile(file); err != nil {
return err
}
}
objChan := make(fs.ObjectsChan, transfers) objChan := make(fs.ObjectsChan, transfers)
errChan := make(chan error, 1) errChan := make(chan error, 1)
@@ -61,36 +53,6 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
return err return err
} }
// operation should be "make" or "remove"
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, operation string) {
if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") {
candidatesList := candidates.ToList()
if operation == "remove" {
// reverse the sort order to ensure we remove subdirs before parent dirs
sort.Sort(sort.Reverse(sort.StringSlice(candidatesList)))
}
for _, s := range candidatesList {
var direrr error
if dirsList.has(s) { //make sure it's a dir, not a file
if operation == "remove" {
//note: we need to use Rmdirs instead of Rmdir because directories will fail to delete if they have other empty dirs inside of them.
direrr = operations.Rmdirs(ctx, dst, s, false)
} else if operation == "make" {
direrr = operations.Mkdir(ctx, dst, s)
} else {
direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation)
}
if direrr != nil {
fs.Debugf(nil, "Error syncing directory: %v", direrr)
}
}
}
}
}
func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error { func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
if !b.opt.SaveQueues { if !b.opt.SaveQueues {
return nil return nil

View File

@@ -26,7 +26,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if dryRun, err := in.GetBool("dryRun"); err == nil { if dryRun, err := in.GetBool("dryRun"); err == nil {
ci.DryRun = dryRun ci.DryRun = dryRun
opt.DryRun = dryRun
} else if rc.NotErrParamNotFound(err) { } else if rc.NotErrParamNotFound(err) {
return nil, err return nil, err
} }
@@ -49,21 +48,12 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if opt.Force, err = in.GetBool("force"); rc.NotErrParamNotFound(err) { if opt.Force, err = in.GetBool("force"); rc.NotErrParamNotFound(err) {
return return
} }
if opt.CreateEmptySrcDirs, err = in.GetBool("createEmptySrcDirs"); rc.NotErrParamNotFound(err) {
return
}
if opt.RemoveEmptyDirs, err = in.GetBool("removeEmptyDirs"); rc.NotErrParamNotFound(err) { if opt.RemoveEmptyDirs, err = in.GetBool("removeEmptyDirs"); rc.NotErrParamNotFound(err) {
return return
} }
if opt.NoCleanup, err = in.GetBool("noCleanup"); rc.NotErrParamNotFound(err) { if opt.NoCleanup, err = in.GetBool("noCleanup"); rc.NotErrParamNotFound(err) {
return return
} }
if opt.IgnoreListingChecksum, err = in.GetBool("ignoreListingChecksum"); rc.NotErrParamNotFound(err) {
return
}
if opt.Resilient, err = in.GetBool("resilient"); rc.NotErrParamNotFound(err) {
return
}
if opt.CheckFilename, err = in.GetString("checkFilename"); rc.NotErrParamNotFound(err) { if opt.CheckFilename, err = in.GetString("checkFilename"); rc.NotErrParamNotFound(err) {
return return
@@ -79,9 +69,6 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
if rc.NotErrParamNotFound(err) { if rc.NotErrParamNotFound(err) {
return nil, err return nil, err
} }
if checkSync == "" {
checkSync = "true"
}
if err := opt.CheckSync.Set(checkSync); err != nil { if err := opt.CheckSync.Set(checkSync); err != nil {
return nil, err return nil, err
} }

View File

@@ -4,7 +4,7 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt" - 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"

View File

@@ -4,5 +4,5 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt" - 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file3.txt" - 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"

View File

@@ -4,7 +4,7 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt" - 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"

View File

@@ -4,5 +4,5 @@
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file2.txt" - 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file4.txt" - 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt" - 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"

View File

@@ -68,11 +68,6 @@ INFO : - Path2 File was deleted - file7.txt
INFO : - Path2 File was deleted - file8.txt INFO : - Path2 File was deleted - file8.txt
INFO : Path2: 7 changes: 1 new, 3 newer, 0 older, 3 deleted INFO : Path2: 7 changes: 1 new, 3 newer, 0 older, 3 deleted
INFO : Applying changes INFO : Applying changes
INFO : Checking potential conflicts...
ERROR : file5.txt: md5 differ
NOTICE: Local file system at {path2}: 1 differences found
NOTICE: Local file system at {path2}: 1 errors while checking
INFO : Finished checking the potential conflicts. 1 differences found
INFO : - Path1 Queue copy to Path2 - {path2/}file11.txt INFO : - Path1 Queue copy to Path2 - {path2/}file11.txt
INFO : - Path1 Queue copy to Path2 - {path2/}file2.txt INFO : - Path1 Queue copy to Path2 - {path2/}file2.txt
INFO : - Path2 Queue delete - {path2/}file4.txt INFO : - Path2 Queue delete - {path2/}file4.txt

View File

@@ -1 +1 @@
This file is newer and not equal to 5R This file is newer

View File

@@ -1 +1 @@
This file is newer and not equal to 5L This file is newer

View File

@@ -39,12 +39,10 @@ Bisync error: bisync aborted
(10) : move-listings path2-missing (10) : move-listings path2-missing
(11) : test 3. put the remote subdir .chk_file back, run resync. (11) : test 3. put the remote subdir .chk_file back, run resync.
(12) : copy-file {path1/}subdir/.chk_file {path2/}subdir/ (12) : copy-file {path1/}subdir/.chk_file {path2/}
(13) : bisync check-access resync check-filename=.chk_file (13) : bisync check-access resync check-filename=.chk_file
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}" INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying unique Path2 files to Path1 INFO : Copying unique Path2 files to Path1
INFO : Checking access health
INFO : Found 2 matching ".chk_file" files on both paths
INFO : Resynching Path1 to Path2 INFO : Resynching Path1 to Path2
INFO : Resync updating listings INFO : Resync updating listings
INFO : Bisync successful INFO : Bisync successful

View File

@@ -20,7 +20,7 @@ bisync check-access check-filename=.chk_file
move-listings path2-missing move-listings path2-missing
test 3. put the remote subdir .chk_file back, run resync. test 3. put the remote subdir .chk_file back, run resync.
copy-file {path1/}subdir/.chk_file {path2/}subdir/ copy-file {path1/}subdir/.chk_file {path2/}
bisync check-access resync check-filename=.chk_file bisync check-access resync check-filename=.chk_file
test 4. run sync with check-access. should pass. test 4. run sync with check-access. should pass.

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,7 +0,0 @@
# bisync listing v1 from test
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"

View File

@@ -1,142 +0,0 @@
(01) : test createemptysrcdirs
(02) : test initial bisync
(03) : touch-glob 2001-01-02 {datadir/} placeholder.txt
(04) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
(05) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
(06) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
(07) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
(08) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
(09) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
(10) : bisync resync
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying unique Path2 files to Path1
INFO : Resynching Path1 to Path2
INFO : Resync updating listings
INFO : Bisync successful
(11) : test 1. Create an empty dir on Path1 by creating subdir/placeholder.txt and then deleting the placeholder
(12) : copy-as {datadir/}placeholder.txt {path1/} subdir/placeholder.txt
(13) : touch-glob 2001-01-02 {path1/} subdir
(14) : delete-file {path1/}subdir/placeholder.txt
(15) : test 2. Run bisync without --create-empty-src-dirs
(16) : bisync
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Path1 checking for diffs
INFO : Path2 checking for diffs
INFO : No changes found
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(17) : test 3. Confirm the subdir exists only on Path1 and not Path2
(18) : list-dirs {path1/}
subdir/
(19) : list-dirs {path2/}
(20) : test 4.Run bisync WITH --create-empty-src-dirs
(21) : bisync create-empty-src-dirs
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Path1 checking for diffs
INFO : - Path1 File is new - subdir
INFO : Path1: 1 changes: 1 new, 0 newer, 0 older, 0 deleted
INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path1 Queue copy to Path2 - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(22) : test 5. Confirm the subdir exists on both paths
(23) : list-dirs {path1/}
subdir/
(24) : list-dirs {path2/}
subdir/
(25) : test 6. Delete the empty dir on Path1 using purge-children (and also add files so the path isn't empty)
(26) : purge-children {path1/}
(27) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
(28) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
(29) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
(30) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
(31) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
(32) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
(33) : test 7. Run bisync without --create-empty-src-dirs
(34) : bisync
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Path1 checking for diffs
INFO : - Path1 File was deleted - RCLONE_TEST
INFO : - Path1 File was deleted - subdir
INFO : Path1: 2 changes: 0 new, 0 newer, 0 older, 2 deleted
INFO : Path2 checking for diffs
INFO : - Path2 File was deleted - subdir
INFO : Path2: 1 changes: 0 new, 0 newer, 0 older, 1 deleted
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}RCLONE_TEST
INFO : - Do queued deletes on - Path2
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(35) : test 8. Confirm the subdir exists only on Path2 and not Path1
(36) : list-dirs {path1/}
(37) : list-dirs {path2/}
subdir/
(38) : test 9. Reset, do the delete again, and run bisync WITH --create-empty-src-dirs
(39) : bisync resync create-empty-src-dirs
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying unique Path2 files to Path1
INFO : - Path2 Resync will copy to Path1 - subdir
INFO : - Path2 Resync is doing queued copies to - Path1
INFO : Resynching Path1 to Path2
INFO : Resynching Path2 to Path1 (for empty dirs)
INFO : Resync updating listings
INFO : Bisync successful
(40) : list-dirs {path1/}
subdir/
(41) : list-dirs {path2/}
subdir/
(42) : purge-children {path1/}
(43) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
(44) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
(45) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
(46) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
(47) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
(48) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
(49) : list-dirs {path1/}
(50) : list-dirs {path2/}
subdir/
(51) : bisync create-empty-src-dirs
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Path1 checking for diffs
INFO : - Path1 File was deleted - subdir
INFO : Path1: 1 changes: 0 new, 0 newer, 0 older, 1 deleted
INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}subdir
INFO : - Do queued deletes on - Path2
INFO : subdir: Removing directory
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(52) : test 10. Confirm the subdir has been removed on both paths
(53) : list-dirs {path1/}
(54) : list-dirs {path2/}
(55) : test 11. bisync again (because if we leave subdir in listings, test will fail due to mismatched modtime)
(56) : bisync create-empty-src-dirs
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Path1 checking for diffs
INFO : Path2 checking for diffs
INFO : No changes found
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -1 +0,0 @@
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.

Some files were not shown because too many files have changed in this diff Show More