mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
201 Commits
fix-linux-
...
v1.64.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77f7bb08af | ||
|
|
a5a61f4874 | ||
|
|
e8879f3e77 | ||
|
|
2a6675cffd | ||
|
|
fa4d171f62 | ||
|
|
d4d530bd8e | ||
|
|
f4b011e4e4 | ||
|
|
d80890bf32 | ||
|
|
39392d70dd | ||
|
|
643386f026 | ||
|
|
ed755bf04f | ||
|
|
071c3f28e5 | ||
|
|
7453b7d5f3 | ||
|
|
c9350149d8 | ||
|
|
08789a5815 | ||
|
|
4037af9c1a | ||
|
|
628ff8e524 | ||
|
|
578c75cb1e | ||
|
|
63ab250817 | ||
|
|
39f910a65d | ||
|
|
0fb36562dd | ||
|
|
8c25a15a40 | ||
|
|
f5ee16e201 | ||
|
|
2bcbed30bd | ||
|
|
5026a9171d | ||
|
|
b750c50bfd | ||
|
|
535acd0483 | ||
|
|
db37b3ef9e | ||
|
|
257607ab3d | ||
|
|
3ea1c5c4d2 | ||
|
|
bd23ea028e | ||
|
|
c58d4fe939 | ||
|
|
ddc7059a73 | ||
|
|
2677c43f26 | ||
|
|
48ab67f090 | ||
|
|
089df7d977 | ||
|
|
4fbe0652c9 | ||
|
|
47665dad07 | ||
|
|
ad724463a5 | ||
|
|
6afd7088d3 | ||
|
|
b33140ddeb | ||
|
|
b1c0ae5e7d | ||
|
|
40bcc7a90b | ||
|
|
be17f1523a | ||
|
|
bb58040d9c | ||
|
|
2db0e23584 | ||
|
|
a7337b0a95 | ||
|
|
7821cb884d | ||
|
|
85c29e3629 | ||
|
|
b7ec75aab6 | ||
|
|
38309f2df2 | ||
|
|
7487d34c33 | ||
|
|
e45cb4fc75 | ||
|
|
21008b4cd5 | ||
|
|
cffe85e6c5 | ||
|
|
d12a92eac9 | ||
|
|
11eeaaf792 | ||
|
|
a603efeaf4 | ||
|
|
0bd0a992a4 | ||
|
|
82c8d78a44 | ||
|
|
a83fec756b | ||
|
|
e953598987 | ||
|
|
feaa20d885 | ||
|
|
967fc6d7f4 | ||
|
|
b95bda1e92 | ||
|
|
9c14562850 | ||
|
|
f992742404 | ||
|
|
f2467d07aa | ||
|
|
d69cdb79f7 | ||
|
|
df5d92d709 | ||
|
|
1b5b36523b | ||
|
|
2f424ceecf | ||
|
|
bc986b44b2 | ||
|
|
f4b1a51af6 | ||
|
|
25703ad20e | ||
|
|
ab803d1278 | ||
|
|
0427177857 | ||
|
|
3dfcfc2caa | ||
|
|
d4cff1ae19 | ||
|
|
f5753369e4 | ||
|
|
4c76fac594 | ||
|
|
0d0bcdac31 | ||
|
|
f3bd02f0ef | ||
|
|
e6fde67491 | ||
|
|
b4e3332e02 | ||
|
|
0dea83a4aa | ||
|
|
e8f3f98aa0 | ||
|
|
d61328e459 | ||
|
|
9844704567 | ||
|
|
94a320f23c | ||
|
|
7fc573db27 | ||
|
|
af95616122 | ||
|
|
72f9f1e9c0 | ||
|
|
91b8152321 | ||
|
|
552b6c47ff | ||
|
|
01a155fb00 | ||
|
|
50d0597d56 | ||
|
|
123a030441 | ||
|
|
28ceb323ee | ||
|
|
c624dd5c3a | ||
|
|
a56c11753a | ||
|
|
4341d472aa | ||
|
|
b6e7148daf | ||
|
|
45458f2cdb | ||
|
|
de147b6e54 | ||
|
|
11de137660 | ||
|
|
156c372cd7 | ||
|
|
c979cde002 | ||
|
|
03aab1a123 | ||
|
|
dc803b572c | ||
|
|
4d19042a61 | ||
|
|
923989d1d7 | ||
|
|
cf65e36cf3 | ||
|
|
cf5457c2cd | ||
|
|
ea4aa696a5 | ||
|
|
34195fd3e8 | ||
|
|
40b8167ab4 | ||
|
|
e365f237f5 | ||
|
|
7d449572bd | ||
|
|
181fecaec3 | ||
|
|
7701d1d33d | ||
|
|
6dd736fbdc | ||
|
|
f36ca0cd25 | ||
|
|
9b3b1c7067 | ||
|
|
0dd0d6a13e | ||
|
|
e5bde42303 | ||
|
|
f01a50eb47 | ||
|
|
5ca61ab705 | ||
|
|
4ac4ce6afd | ||
|
|
40a874a0d8 | ||
|
|
f4dd86238d | ||
|
|
4f1eafb044 | ||
|
|
20c9e0cab6 | ||
|
|
9c09cf9cf6 | ||
|
|
3a3af00180 | ||
|
|
281e0c2d62 | ||
|
|
25b81b8789 | ||
|
|
90fdd97a7b | ||
|
|
3c58e0efe0 | ||
|
|
db744f64f6 | ||
|
|
480220a84a | ||
|
|
d0362171cf | ||
|
|
45887d11f6 | ||
|
|
c4bad5c1bc | ||
|
|
40de89df73 | ||
|
|
27f5297e8d | ||
|
|
de185de215 | ||
|
|
d362db2e08 | ||
|
|
db2a49e384 | ||
|
|
d63fcc6e44 | ||
|
|
4444037f5c | ||
|
|
a555513c26 | ||
|
|
039c260216 | ||
|
|
4577c08e05 | ||
|
|
c9ed691919 | ||
|
|
9f96c0d4ea | ||
|
|
91d095f468 | ||
|
|
bff702a6f1 | ||
|
|
a1d6bbd31f | ||
|
|
fb6a9dfbf3 | ||
|
|
3f3c5f3ff4 | ||
|
|
89196cb353 | ||
|
|
9284506b86 | ||
|
|
88c72d1f4d | ||
|
|
5e3bf50b2e | ||
|
|
982f76b4df | ||
|
|
347812d1d3 | ||
|
|
f4449440f8 | ||
|
|
e66675d346 | ||
|
|
45228e2f18 | ||
|
|
b866850fdd | ||
|
|
5b63b9534f | ||
|
|
10449c86a4 | ||
|
|
26a9a9fed2 | ||
|
|
602e42d334 | ||
|
|
4c5a21703e | ||
|
|
f2ee949eff | ||
|
|
3ad255172c | ||
|
|
29b1751d0e | ||
|
|
363da9aa82 | ||
|
|
6c8148ef39 | ||
|
|
3ed4a2e963 | ||
|
|
aaadb48d48 | ||
|
|
52e25c43b9 | ||
|
|
9a66563fc6 | ||
|
|
6ca670d66a | ||
|
|
809653055d | ||
|
|
61325ce507 | ||
|
|
c3989d1906 | ||
|
|
a79887171c | ||
|
|
f29e284c90 | ||
|
|
9a66086fa0 | ||
|
|
1845c261c6 | ||
|
|
70cbcef624 | ||
|
|
9169b2b5ab | ||
|
|
0957c8fb74 | ||
|
|
bb0cd76a5f | ||
|
|
08240c8cf5 | ||
|
|
014acc902d | ||
|
|
33fec9c835 | ||
|
|
3a5ffc7839 |
22
.github/workflows/build.yml
vendored
22
.github/workflows/build.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.21.0-rc.3'
|
||||
go: '1.21'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.21.0-rc.3'
|
||||
go: '1.21'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.21.0-rc.3'
|
||||
go: '1.21'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.21.0-rc.3'
|
||||
go: '1.21'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.21.0-rc.3'
|
||||
go: '1.21'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,7 +76,7 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.21.0-rc.3'
|
||||
go: '1.21'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
@@ -99,7 +99,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -232,7 +232,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
@@ -244,7 +244,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.0-rc.3'
|
||||
go-version: '1.21'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -261,7 +261,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -269,7 +269,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21.0-rc.3'
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Login to Docker Hub
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
2
.github/workflows/winget.yml
vendored
2
.github/workflows/winget.yml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: windows-latest # Action can only run on Windows
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: vedantmgoyal2009/winget-releaser@v2
|
||||
with:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -8,10 +8,10 @@ rclone.iml
|
||||
.idea
|
||||
.history
|
||||
*.test
|
||||
*.log
|
||||
*.iml
|
||||
fuzz-build.zip
|
||||
*.orig
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
.DS_Store
|
||||
@@ -33,23 +33,67 @@ issues:
|
||||
- staticcheck
|
||||
text: 'SA1019: "github.com/rclone/rclone/cmd/serve/httplib" is deprecated'
|
||||
|
||||
# don't disable the revive messages about comments on exported functions
|
||||
include:
|
||||
- EXC0012
|
||||
- EXC0013
|
||||
- EXC0014
|
||||
- EXC0015
|
||||
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 10m
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# setting rules seems to disable all the rules, so re-enable them here
|
||||
rules:
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: blank-imports
|
||||
disabled: false
|
||||
- name: context-as-argument
|
||||
disabled: false
|
||||
- name: context-keys-type
|
||||
disabled: false
|
||||
- name: dot-imports
|
||||
disabled: false
|
||||
- name: empty-block
|
||||
disabled: true
|
||||
- name: error-naming
|
||||
disabled: false
|
||||
- name: error-return
|
||||
disabled: false
|
||||
- name: error-strings
|
||||
disabled: false
|
||||
- name: errorf
|
||||
disabled: false
|
||||
- name: exported
|
||||
disabled: false
|
||||
- name: increment-decrement
|
||||
disabled: true
|
||||
- name: indent-error-flow
|
||||
disabled: false
|
||||
- name: package-comments
|
||||
disabled: false
|
||||
- name: range
|
||||
disabled: false
|
||||
- name: receiver-naming
|
||||
disabled: false
|
||||
- name: redefines-builtin-id
|
||||
disabled: true
|
||||
- name: superfluous-else
|
||||
disabled: true
|
||||
- name: time-naming
|
||||
disabled: false
|
||||
- name: unexported-return
|
||||
disabled: false
|
||||
- name: unreachable-code
|
||||
disabled: true
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- name: var-declaration
|
||||
disabled: false
|
||||
- name: var-naming
|
||||
disabled: false
|
||||
stylecheck:
|
||||
# Only enable the checks performed by the staticcheck stand-alone tool,
|
||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||
|
||||
@@ -419,7 +419,7 @@ remote or an fs.
|
||||
|
||||
Research
|
||||
|
||||
* Look at the interfaces defined in `fs/fs.go`
|
||||
* Look at the interfaces defined in `fs/types.go`
|
||||
* Study one or more of the existing remotes
|
||||
|
||||
Getting going
|
||||
@@ -428,14 +428,19 @@ Getting going
|
||||
* box is a good one to start from if you have a directory-based remote
|
||||
* b2 is a good one to start from if you have a bucket-based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* Use [lib/encoder](https://pkg.go.dev/github.com/rclone/rclone/lib/encoder) to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone test info --all --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/test/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Important:
|
||||
|
||||
* Please use [lib/rest](https://pkg.go.dev/github.com/rclone/rclone/lib/rest) if you are implementing a REST like backend and parsing XML/JSON in the backend. It makes maintenance much easier.
|
||||
* If your backend is HTTP based then please use rclone's Client or Transport from [fs/fshttp](https://pkg.go.dev/github.com/rclone/rclone/fs/fshttp) - this adds features like `--dump bodies`, `--tpslimit`, `--user-agent` without you having to code anything!
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
@@ -469,7 +474,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them with `make backenddocs` - revert any changes in other backends
|
||||
* update them in your backend with `bin/make_backend_docs.py remote`
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/_index.md` - front page of rclone.org
|
||||
|
||||
@@ -18,6 +18,9 @@ Current active maintainers of rclone are:
|
||||
| Caleb Case | @calebcase | storj backend |
|
||||
| wiserain | @wiserain | pikpak backend |
|
||||
| albertony | @albertony | |
|
||||
| Chun-Hung Tseng | @henrybear327 | Proton Drive Backend |
|
||||
| Hideo Aoyama | @boukendesho | snap packaging |
|
||||
| nielash | @nielash | bisync |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
29365
MANUAL.html
generated
29365
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
30877
MANUAL.txt
generated
30877
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -73,8 +73,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* PikPak [:page_facing_up:](https://rclone.org/pikpak/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* Proton Drive [:page_facing_up:](https://rclone.org/protondrive/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
|
||||
@@ -38,8 +38,10 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/pikpak"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/protondrive"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/quatrix"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/seafile"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -46,10 +45,8 @@ import (
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -70,8 +67,6 @@ const (
|
||||
emulatorAccount = "devstoreaccount1"
|
||||
emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw=="
|
||||
emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1"
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -337,17 +332,16 @@ to start uploading.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Default: fs.Duration(time.Minute),
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -432,8 +426,6 @@ type Options struct {
|
||||
ArchiveTierDelete bool `config:"archive_tier_delete"`
|
||||
UseEmulator bool `config:"use_emulator"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
PublicAccess string `config:"public_access"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
@@ -457,8 +449,6 @@ type Fs struct {
|
||||
cache *bucket.Cache // cache for container creation status
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
poolSize int64 // size of pages in memory pool
|
||||
publicAccess container.PublicAccessType // Container Public Access Level
|
||||
}
|
||||
|
||||
@@ -671,13 +661,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
cache: bucket.NewCache(),
|
||||
cntSVCcache: make(map[string]*container.Client, 1),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
poolSize: int64(opt.ChunkSize),
|
||||
}
|
||||
f.publicAccess = container.PublicAccessType(opt.PublicAccess)
|
||||
f.setRoot(root)
|
||||
@@ -1503,7 +1486,7 @@ func (f *Fs) deleteContainer(ctx context.Context, containerName string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
// Remove directory marker file
|
||||
if f.opt.DirectoryMarkers && container != "" && dir != "" {
|
||||
if f.opt.DirectoryMarkers && container != "" && directory != "" {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: dir + "/",
|
||||
@@ -1537,7 +1520,10 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Purge deletes all the files and directories including the old versions.
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
container, directory := f.split(dir)
|
||||
if container == "" || directory != "" {
|
||||
if container == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
if directory != "" {
|
||||
// Delegate to caller if not root of a container
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
@@ -1594,19 +1580,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||
if size == int64(f.opt.ChunkSize) {
|
||||
return f.pool
|
||||
}
|
||||
|
||||
return pool.New(
|
||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||
int(size),
|
||||
f.ci.Transfers,
|
||||
f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
@@ -1982,8 +1955,8 @@ func (rs *readSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// increment the slice passed in as LSB binary
|
||||
func increment(xs []byte) {
|
||||
// increment the array as LSB binary
|
||||
func increment(xs *[8]byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
@@ -1994,22 +1967,43 @@ func increment(xs []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
// record chunk number and id for Close
|
||||
type azBlock struct {
|
||||
chunkNumber int
|
||||
id string
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
// Implements the fs.ChunkWriter interface
|
||||
type azChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
ui uploadInfo
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
o *Object
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
|
||||
// Calculate correct partSize
|
||||
partSize := o.fs.opt.ChunkSize
|
||||
totalParts := -1
|
||||
|
||||
// make concurrency machinery
|
||||
concurrency := o.fs.opt.UploadConcurrency
|
||||
if concurrency < 1 {
|
||||
concurrency = 1
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
tokens := pacer.NewTokenDispenser(concurrency)
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
// Calculate correct partSize
|
||||
partSize := f.opt.ChunkSize
|
||||
totalParts := -1
|
||||
size := src.Size()
|
||||
|
||||
// Note that the max size of file is 4.75 TB (100 MB X 50,000
|
||||
// blocks) and this is bigger than the max uncommitted block
|
||||
@@ -2023,13 +2017,13 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
// 195GB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(o, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
o.fs.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, partSize*fs.SizeSuffix(blockblob.MaxBlocks))
|
||||
})
|
||||
} else {
|
||||
partSize = chunksize.Calculator(o, size, blockblob.MaxBlocks, o.fs.opt.ChunkSize)
|
||||
partSize = chunksize.Calculator(remote, size, blockblob.MaxBlocks, f.opt.ChunkSize)
|
||||
if partSize > fs.SizeSuffix(blockblob.MaxStageBlockBytes) {
|
||||
return fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
|
||||
return info, nil, fmt.Errorf("can't upload as it is too big %v - takes more than %d chunks of %v", fs.SizeSuffix(size), fs.SizeSuffix(blockblob.MaxBlocks), fs.SizeSuffix(blockblob.MaxStageBlockBytes))
|
||||
}
|
||||
totalParts = int(fs.SizeSuffix(size) / partSize)
|
||||
if fs.SizeSuffix(size)%partSize != 0 {
|
||||
@@ -2039,173 +2033,262 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", totalParts, partSize)
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// FIXME it would be nice to delete uncommitted blocks
|
||||
// See: https://github.com/rclone/rclone/issues/5583
|
||||
//
|
||||
// However there doesn't seem to be an easy way of doing this other than
|
||||
// by deleting the target.
|
||||
//
|
||||
// This means that a failed upload deletes the target which isn't ideal.
|
||||
//
|
||||
// Uploading a zero length blob and deleting it will remove the
|
||||
// uncommitted blocks I think.
|
||||
//
|
||||
// Could check to see if a file exists already and if it
|
||||
// doesn't then create a 0 length file and delete it to flush
|
||||
// the uncommitted blocks.
|
||||
//
|
||||
// This is what azcopy does
|
||||
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
||||
// defer atexit.OnError(&err, func() {
|
||||
// fs.Debugf(o, "Cancelling multipart upload")
|
||||
// // Code goes here!
|
||||
// })()
|
||||
|
||||
// Upload the chunks
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = fs.SizeSuffix(size) // remaining size in file for logging only, -1 if size < 0
|
||||
position = fs.SizeSuffix(0) // position in file
|
||||
memPool = o.fs.getMemoryPool(int64(partSize)) // pool to get memory from
|
||||
finished = false // set when we have read EOF
|
||||
blocks []string // list of blocks for finalize
|
||||
binaryBlockID = make([]byte, 8) // block counter as LSB first 8 bytes
|
||||
)
|
||||
for part := 0; !finished; part++ {
|
||||
// Get a block of memory from the pool and a token which limits concurrency
|
||||
tokens.Get()
|
||||
buf := memPool.Get()
|
||||
|
||||
free := func() {
|
||||
memPool.Put(buf) // return the buf
|
||||
tokens.Put() // return the token
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
free()
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
n, err := readers.ReadFill(in, buf) // this can never return 0, nil
|
||||
if err == io.EOF {
|
||||
if n == 0 { // end if no data
|
||||
free()
|
||||
break
|
||||
}
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID)
|
||||
blocks = append(blocks, blockID)
|
||||
|
||||
// Transfer the chunk
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %d", part+1, totalParts, position, fs.SizeSuffix(size), len(buf))
|
||||
g.Go(func() (err error) {
|
||||
defer free()
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
md5sum := md5.Sum(buf)
|
||||
transactionalMD5 := md5sum[:]
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
bufferReader := bytes.NewReader(buf)
|
||||
wrappedReader := wrap(bufferReader)
|
||||
rs := readSeekCloser{wrappedReader, bufferReader}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = blb.StageBlock(ctx, blockID, &rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// ready for next block
|
||||
if size >= 0 {
|
||||
remaining -= partSize
|
||||
}
|
||||
position += partSize
|
||||
chunkWriter := &azChunkWriter{
|
||||
chunkSize: int64(partSize),
|
||||
size: size,
|
||||
f: f,
|
||||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
err = g.Wait()
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(partSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload")
|
||||
return info, chunkWriter, nil
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Upload the block, with MD5 for check
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
return -1, err
|
||||
}
|
||||
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
HTTPHeaders: httpHeaders,
|
||||
// If no data read, don't write the chunk
|
||||
if currentChunkSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
md5sum := m.Sum(nil)
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// Finalise the upload session
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.CommitBlockList(ctx, blocks, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(&w.binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
|
||||
|
||||
// Save the blockID for the commit
|
||||
w.blocksMu.Lock()
|
||||
w.blocks = append(w.blocks, azBlock{
|
||||
chunkNumber: chunkNumber,
|
||||
id: blockID,
|
||||
})
|
||||
w.blocksMu.Unlock()
|
||||
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
options := blockblob.StageBlockOptions{
|
||||
// Specify the transactional md5 for the body, to be validated by the service.
|
||||
TransactionalValidation: blob.TransferValidationTypeMD5(transactionalMD5),
|
||||
}
|
||||
_, err = w.ui.blb.StageBlock(ctx, blockID, &readSeekCloser{Reader: reader, Seeker: reader}, &options)
|
||||
if err != nil {
|
||||
if chunkNumber <= 8 {
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("multipart upload failed to finalize: %w", err)
|
||||
return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
|
||||
}
|
||||
|
||||
fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes", chunkNumber+1, currentChunkSize)
|
||||
return currentChunkSize, err
|
||||
}
|
||||
|
||||
// Abort the multpart upload.
|
||||
//
|
||||
// FIXME it would be nice to delete uncommitted blocks.
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/5583
|
||||
//
|
||||
// However there doesn't seem to be an easy way of doing this other than
|
||||
// by deleting the target.
|
||||
//
|
||||
// This means that a failed upload deletes the target which isn't ideal.
|
||||
//
|
||||
// Uploading a zero length blob and deleting it will remove the
|
||||
// uncommitted blocks I think.
|
||||
//
|
||||
// Could check to see if a file exists already and if it doesn't then
|
||||
// create a 0 length file and delete it to flush the uncommitted
|
||||
// blocks.
|
||||
//
|
||||
// This is what azcopy does
|
||||
// https://github.com/MicrosoftDocs/azure-docs/issues/36347#issuecomment-541457962
|
||||
func (w *azChunkWriter) Abort(ctx context.Context) error {
|
||||
fs.Debugf(w.o, "multipart upload aborted (did nothing - see issue #5583)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close and finalise the multipart upload
|
||||
func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
||||
// sort the completed parts by part number
|
||||
sort.Slice(w.blocks, func(i, j int) bool {
|
||||
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
|
||||
})
|
||||
|
||||
// Create a list of block IDs
|
||||
blockIDs := make([]string, len(w.blocks))
|
||||
for i := range w.blocks {
|
||||
blockIDs[i] = w.blocks[i].id
|
||||
}
|
||||
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: w.o.getMetadata(),
|
||||
Tier: parseTier(w.f.opt.AccessTier),
|
||||
HTTPHeaders: &w.ui.httpHeaders,
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
_, err := w.ui.blb.CommitBlockList(ctx, blockIDs, &options)
|
||||
return w.f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to complete multipart upload: %w", err)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload finished")
|
||||
return err
|
||||
}
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
//
|
||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (ui uploadInfo, err error) {
|
||||
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
return chunkWriter.(*azChunkWriter).ui, nil
|
||||
}
|
||||
|
||||
// uploadSinglepart uploads a short blob using a single part upload
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, blb *blockblob.Client, httpHeaders *blob.HTTPHeaders) (err error) {
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, ui uploadInfo) (err error) {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
// fs.Debugf(o, "Single part upload starting of object %d bytes", size)
|
||||
if size > o.fs.poolSize || size < 0 {
|
||||
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, o.fs.opt.ChunkSize)
|
||||
if size > chunkSize || size < 0 {
|
||||
return fmt.Errorf("internal error: single part upload size too big %d > %d", size, chunkSize)
|
||||
}
|
||||
|
||||
buf := o.fs.pool.Get()
|
||||
defer o.fs.pool.Put(buf)
|
||||
rw := multipart.NewRW()
|
||||
defer fs.CheckClose(rw, &err)
|
||||
|
||||
n, err := readers.ReadFill(in, buf)
|
||||
if err == nil {
|
||||
// Check to see whether in is exactly len(buf) or bigger
|
||||
var buf2 = []byte{0}
|
||||
n2, err2 := readers.ReadFill(in, buf2)
|
||||
if n2 != 0 || err2 != io.EOF {
|
||||
return fmt.Errorf("single part upload read failed: object longer than expected (expecting %d but got > %d)", size, len(buf))
|
||||
}
|
||||
}
|
||||
n, err := io.CopyN(rw, in, size+1)
|
||||
if err != nil && err != io.EOF {
|
||||
return fmt.Errorf("single part upload read failed: %w", err)
|
||||
}
|
||||
if int64(n) != size {
|
||||
if n != size {
|
||||
return fmt.Errorf("single part upload: expecting to read %d bytes but read %d", size, n)
|
||||
}
|
||||
|
||||
b := bytes.NewReader(buf[:n])
|
||||
rs := &readSeekCloser{Reader: b, Seeker: b}
|
||||
rs := &readSeekCloser{Reader: rw, Seeker: rw}
|
||||
|
||||
options := blockblob.UploadOptions{
|
||||
Metadata: o.getMetadata(),
|
||||
Tier: parseTier(o.fs.opt.AccessTier),
|
||||
HTTPHeaders: httpHeaders,
|
||||
HTTPHeaders: &ui.httpHeaders,
|
||||
}
|
||||
|
||||
// Don't retry, return a retry error instead
|
||||
return o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
_, err = blb.Upload(ctx, rs, &options)
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
// rewind the reader on retry
|
||||
_, err = rs.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, err = ui.blb.Upload(ctx, rs, &options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
}
|
||||
|
||||
// Info needed for an upload
|
||||
type uploadInfo struct {
|
||||
blb *blockblob.Client
|
||||
httpHeaders blob.HTTPHeaders
|
||||
isDirMarker bool
|
||||
}
|
||||
|
||||
// Prepare the object for upload
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return ui, fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
_, ui.isDirMarker = o.meta[dirMetaKey]
|
||||
if !ui.isDirMarker {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
|
||||
// Create the HTTP headers for the upload
|
||||
ui.httpHeaders = blob.HTTPHeaders{
|
||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||
}
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
ui.httpHeaders.BlobContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply upload options (also allows one to overwrite content-type)
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
ui.httpHeaders.BlobCacheControl = pString(value)
|
||||
case "content-disposition":
|
||||
ui.httpHeaders.BlobContentDisposition = pString(value)
|
||||
case "content-encoding":
|
||||
ui.httpHeaders.BlobContentEncoding = pString(value)
|
||||
case "content-language":
|
||||
ui.httpHeaders.BlobContentLanguage = pString(value)
|
||||
case "content-type":
|
||||
ui.httpHeaders.BlobContentType = pString(value)
|
||||
}
|
||||
}
|
||||
|
||||
ui.blb = o.fs.getBlockBlobSVC(container, containerPath)
|
||||
return ui, nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
@@ -2221,80 +2304,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return errCantUpdateArchiveTierBlobs
|
||||
}
|
||||
}
|
||||
container, containerPath := o.split()
|
||||
if container == "" || containerPath == "" {
|
||||
return fmt.Errorf("can't upload to root - need a container")
|
||||
}
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
_, isDirMarker := o.meta[dirMetaKey]
|
||||
if !isDirMarker {
|
||||
err = o.fs.mkdirParent(ctx, o.remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update Mod time
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the HTTP headers for the upload
|
||||
httpHeaders := blob.HTTPHeaders{
|
||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||
}
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||
if err == nil {
|
||||
httpHeaders.BlobContentMD5 = sourceMD5bytes
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply upload options (also allows one to overwrite content-type)
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
httpHeaders.BlobCacheControl = pString(value)
|
||||
case "content-disposition":
|
||||
httpHeaders.BlobContentDisposition = pString(value)
|
||||
case "content-encoding":
|
||||
httpHeaders.BlobContentEncoding = pString(value)
|
||||
case "content-language":
|
||||
httpHeaders.BlobContentLanguage = pString(value)
|
||||
case "content-type":
|
||||
httpHeaders.BlobContentType = pString(value)
|
||||
}
|
||||
}
|
||||
|
||||
blb := o.fs.getBlockBlobSVC(container, containerPath)
|
||||
size := src.Size()
|
||||
multipartUpload := size < 0 || size > o.fs.poolSize
|
||||
multipartUpload := size < 0 || size > int64(o.fs.opt.ChunkSize)
|
||||
var ui uploadInfo
|
||||
|
||||
fs.Debugf(nil, "o.meta = %+v", o.meta)
|
||||
if multipartUpload {
|
||||
err = o.uploadMultipart(ctx, in, size, blb, &httpHeaders)
|
||||
ui, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else {
|
||||
err = o.uploadSinglepart(ctx, in, size, blb, &httpHeaders)
|
||||
ui, err = o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
err = o.uploadSinglepart(ctx, in, size, ui)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Refresh metadata on object
|
||||
if !isDirMarker {
|
||||
if !ui.isDirMarker {
|
||||
o.clearMetaData()
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
@@ -2383,13 +2412,14 @@ func parseTier(tier string) *blob.AccessTier {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
|
||||
@@ -20,17 +20,18 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in []byte
|
||||
want []byte
|
||||
in [8]byte
|
||||
want [8]byte
|
||||
}{
|
||||
{[]byte{0, 0, 0, 0}, []byte{1, 0, 0, 0}},
|
||||
{[]byte{0xFE, 0, 0, 0}, []byte{0xFF, 0, 0, 0}},
|
||||
{[]byte{0xFF, 0, 0, 0}, []byte{0, 1, 0, 0}},
|
||||
{[]byte{0, 1, 0, 0}, []byte{1, 1, 0, 0}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFE}, []byte{0, 0, 0, 0xFF}},
|
||||
{[]byte{0xFF, 0xFF, 0xFF, 0xFF}, []byte{0, 0, 0, 0}},
|
||||
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
||||
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
||||
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
||||
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
||||
} {
|
||||
increment(test.in)
|
||||
increment(&test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,9 +31,9 @@ func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
name := "TestAzureBlob:"
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name,
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
|
||||
148
backend/b2/b2.go
148
backend/b2/b2.go
@@ -32,6 +32,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
@@ -57,9 +58,7 @@ const (
|
||||
minChunkSize = 5 * fs.Mebi
|
||||
defaultChunkSize = 96 * fs.Mebi
|
||||
defaultUploadCutoff = 200 * fs.Mebi
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
|
||||
memoryPoolUseMmap = false
|
||||
largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -149,6 +148,18 @@ might a maximum of "--transfers" chunks in progress at once.
|
||||
5,000,000 Bytes is the minimum size.`,
|
||||
Default: defaultChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
Note that chunks are stored in memory and there may be up to
|
||||
"--transfers" * "--b2-upload-concurrency" chunks stored at once
|
||||
in memory.`,
|
||||
Default: 16,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Disable checksums for large (> upload cutoff) files.
|
||||
@@ -188,16 +199,16 @@ The minimum value is 1 second. The maximum value is one week.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "memory_pool_flush_time",
|
||||
Default: memoryPoolFlushTime,
|
||||
Default: fs.Duration(time.Minute),
|
||||
Advanced: true,
|
||||
Help: `How often internal memory buffer pools will be flushed.
|
||||
Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
|
||||
This option controls how often unused buffers will be removed from the pool.`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `How often internal memory buffer pools will be flushed. (no longer used)`,
|
||||
}, {
|
||||
Name: "memory_pool_use_mmap",
|
||||
Default: memoryPoolUseMmap,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Help: `Whether to use mmap buffers in internal memory pool.`,
|
||||
Hide: fs.OptionHideBoth,
|
||||
Help: `Whether to use mmap buffers in internal memory pool. (no longer used)`,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -224,11 +235,10 @@ type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableCheckSum bool `config:"disable_checksum"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DownloadAuthorizationDuration fs.Duration `config:"download_auth_duration"`
|
||||
MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"`
|
||||
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -253,7 +263,6 @@ type Fs struct {
|
||||
authMu sync.Mutex // lock for authorizing the account
|
||||
pacer *fs.Pacer // To pace and retry the API calls
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
pool *pool.Pool // memory pool
|
||||
}
|
||||
|
||||
// Object describes a b2 object
|
||||
@@ -458,12 +467,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
uploads: make(map[string][]*api.GetUploadURLResponse),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
pool: pool.New(
|
||||
time.Duration(opt.MemoryPoolFlushTime),
|
||||
int(opt.ChunkSize),
|
||||
ci.Transfers,
|
||||
opt.MemoryPoolUseMmap,
|
||||
),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
@@ -597,23 +600,24 @@ func (f *Fs) clearUploadURL(bucketID string) {
|
||||
f.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// getBuf gets a buffer of f.opt.ChunkSize and an upload token
|
||||
// getRW gets a RW buffer and an upload token
|
||||
//
|
||||
// If noBuf is set then it just gets an upload token
|
||||
func (f *Fs) getBuf(noBuf bool) (buf []byte) {
|
||||
func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
|
||||
f.uploadToken.Get()
|
||||
if !noBuf {
|
||||
buf = f.pool.Get()
|
||||
rw = multipart.NewRW()
|
||||
}
|
||||
return buf
|
||||
return rw
|
||||
}
|
||||
|
||||
// putBuf returns a buffer to the memory pool and an upload token
|
||||
// putRW returns a RW buffer to the memory pool and returns an upload
|
||||
// token
|
||||
//
|
||||
// If noBuf is set then it just returns the upload token
|
||||
func (f *Fs) putBuf(buf []byte, noBuf bool) {
|
||||
if !noBuf {
|
||||
f.pool.Put(buf)
|
||||
// If buf is nil then it just returns the upload token
|
||||
func (f *Fs) putRW(rw *pool.RW) {
|
||||
if rw != nil {
|
||||
_ = rw.Close()
|
||||
}
|
||||
f.uploadToken.Put()
|
||||
}
|
||||
@@ -1293,7 +1297,7 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
return up.Copy(ctx)
|
||||
}
|
||||
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
@@ -1422,7 +1426,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
absPath := "/" + bucketPath
|
||||
absPath := "/" + urlEncode(bucketPath)
|
||||
link = RootURL + "/file/" + urlEncode(bucket) + absPath
|
||||
bucketType, err := f.getbucketType(ctx, bucket)
|
||||
if err != nil {
|
||||
@@ -1861,11 +1865,11 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size == -1 {
|
||||
if size < 0 {
|
||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||
buf := o.fs.getBuf(false)
|
||||
rw := o.fs.getRW(false)
|
||||
|
||||
n, err := io.ReadFull(in, buf)
|
||||
n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
|
||||
if err == nil {
|
||||
bufReader := bufio.NewReader(in)
|
||||
in = bufReader
|
||||
@@ -1876,26 +1880,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
o.fs.putBuf(buf, false)
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
}
|
||||
// NB Stream returns the buffer and token
|
||||
return up.Stream(ctx, buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return up.Stream(ctx, rw)
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putBuf(buf, false)
|
||||
size = int64(n)
|
||||
in = bytes.NewReader(buf[:n])
|
||||
defer o.fs.putRW(rw)
|
||||
size = n
|
||||
in = rw
|
||||
} else {
|
||||
o.fs.putBuf(buf, false)
|
||||
o.fs.putRW(rw)
|
||||
return err
|
||||
}
|
||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.Upload(ctx)
|
||||
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
modTime := src.ModTime(ctx)
|
||||
@@ -2003,6 +2007,41 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// FIXME what if file is smaller than 1 chunk?
|
||||
if f.opt.Versions {
|
||||
return info, nil, errNotWithVersions
|
||||
}
|
||||
if f.opt.VersionAt.IsSet() {
|
||||
return info, nil, errNotWithVersionAt
|
||||
}
|
||||
//size := src.Size()
|
||||
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
bucket, _ := o.split()
|
||||
err = f.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return info, nil, err
|
||||
}
|
||||
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(f.opt.ChunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
|
||||
return info, up, err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
@@ -2030,14 +2069,15 @@ func (o *Object) ID() string {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -80,7 +78,8 @@ type largeUpload struct {
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
parts int // calculated number of parts, if known
|
||||
sha1smu sync.Mutex // mutex to protect sha1s
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
@@ -93,18 +92,16 @@ type largeUpload struct {
|
||||
// If newInfo is set then metadata from that will be used instead of reading it from src
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo, defaultChunkSize fs.SizeSuffix, doCopy bool, newInfo *api.File) (up *largeUpload, err error) {
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
parts := 0
|
||||
chunkSize := defaultChunkSize
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(o, size, maxParts, defaultChunkSize)
|
||||
parts = size / int64(chunkSize)
|
||||
parts = int(size / int64(chunkSize))
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
@@ -152,7 +149,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
sha1s: make([]string, 0, 16),
|
||||
chunkSize: int64(chunkSize),
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
@@ -203,10 +200,39 @@ func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
// Add an sha1 to the being built up sha1s
|
||||
func (up *largeUpload) addSha1(chunkNumber int, sha1 string) {
|
||||
up.sha1smu.Lock()
|
||||
defer up.sha1smu.Unlock()
|
||||
if len(up.sha1s) < chunkNumber+1 {
|
||||
up.sha1s = append(up.sha1s, make([]string, chunkNumber+1-len(up.sha1s))...)
|
||||
}
|
||||
up.sha1s[chunkNumber] = sha1
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (up *largeUpload) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (size int64, err error) {
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(1)
|
||||
}
|
||||
|
||||
err = up.f.pacer.Call(func() (bool, error) {
|
||||
// Discover the size by seeking to the end
|
||||
size, err = reader.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// rewind the reader on retry and after reading size
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", chunkNumber, size)
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
@@ -214,8 +240,8 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
in := newHashAppendingReader(reader, sha1.New())
|
||||
sizeWithHash := size + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
@@ -245,10 +271,10 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
Body: up.wrap(in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", chunkNumber+1),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
ContentLength: &sizeWithHash,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
@@ -256,7 +282,7 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", chunkNumber, retry, err, err)
|
||||
}
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
@@ -264,30 +290,30 @@ func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byt
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
up.addSha1(chunkNumber, in.HexSum())
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", chunkNumber, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
fs.Debugf(up.o, "Done sending chunk %d", chunkNumber)
|
||||
}
|
||||
return err
|
||||
return size, err
|
||||
}
|
||||
|
||||
// Copy a chunk
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64) error {
|
||||
func (up *largeUpload) copyChunk(ctx context.Context, part int, partSize int64) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Copying chunk %d length %d", part, partSize)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_copy_part",
|
||||
}
|
||||
offset := (part - 1) * up.chunkSize // where we are in the source file
|
||||
offset := int64(part) * up.chunkSize // where we are in the source file
|
||||
var request = api.CopyPartRequest{
|
||||
SourceID: up.src.id,
|
||||
LargeFileID: up.id,
|
||||
PartNumber: part,
|
||||
PartNumber: int64(part + 1),
|
||||
Range: fmt.Sprintf("bytes=%d-%d", offset, offset+partSize-1),
|
||||
}
|
||||
var response api.UploadPartResponse
|
||||
@@ -296,7 +322,7 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error copying chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
up.sha1s[part-1] = response.SHA1
|
||||
up.addSha1(part, response.SHA1)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -307,8 +333,8 @@ func (up *largeUpload) copyChunk(ctx context.Context, part int64, partSize int64
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
// Close closes off the large upload
|
||||
func (up *largeUpload) Close(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file %s with %d parts", up.what, up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -329,8 +355,8 @@ func (up *largeUpload) finish(ctx context.Context) error {
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
// Abort aborts the large upload
|
||||
func (up *largeUpload) Abort(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Cancelling large file %s", up.what)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -355,157 +381,98 @@ func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
// reaches EOF.
|
||||
//
|
||||
// Note that initialUploadBlock must be returned to f.putBuf()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock *pool.RW) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
hasMoreParts = true
|
||||
)
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
g.Go(func() error {
|
||||
for part := int64(1); hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var buf []byte
|
||||
if part == 1 {
|
||||
buf = initialUploadBlock
|
||||
} else {
|
||||
buf = up.f.getBuf(false)
|
||||
}
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
if part == 1 {
|
||||
n = len(buf)
|
||||
} else {
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putBuf(buf, false)
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putBuf(buf, false)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
up.f.putBuf(buf, false)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putBuf(buf, false)
|
||||
return up.transferChunk(gCtx, part, buf)
|
||||
})
|
||||
up.size = initialUploadBlock.Size()
|
||||
for part := 0; hasMoreParts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
var rw *pool.RW
|
||||
if part == 1 {
|
||||
rw = initialUploadBlock
|
||||
} else {
|
||||
rw = up.f.getRW(false)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
up.f.putRW(rw)
|
||||
break
|
||||
}
|
||||
|
||||
// Read the chunk
|
||||
var n int64
|
||||
if part == 1 {
|
||||
n = rw.Size()
|
||||
} else {
|
||||
n, err = io.CopyN(rw, up.in, up.chunkSize)
|
||||
if err == io.EOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
hasMoreParts = false
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putRW(rw)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += n
|
||||
if part > maxParts {
|
||||
up.f.putRW(rw)
|
||||
return fmt.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer up.f.putRW(rw)
|
||||
_, err = up.WriteChunk(gCtx, part, rw)
|
||||
return err
|
||||
})
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
return up.finish(ctx)
|
||||
return up.Close(ctx)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
// Copy the chunks from the source to the destination
|
||||
func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.Abort(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||
for part := 0; part <= up.parts; part++ {
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
return up.copyChunk(gCtx, part, reqSize)
|
||||
})
|
||||
remaining -= reqSize
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := getBuf()
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
putBuf(buf)
|
||||
return nil
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= up.chunkSize {
|
||||
reqSize = up.chunkSize
|
||||
}
|
||||
|
||||
if !up.doCopy {
|
||||
// Read the chunk
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
putBuf(buf)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer putBuf(buf)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
err = up.copyChunk(gCtx, part, reqSize)
|
||||
}
|
||||
return err
|
||||
})
|
||||
remaining -= reqSize
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.finish(ctx)
|
||||
return up.Close(ctx)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func (e *Error) Error() string {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
out += fmt.Sprintf(" (%s)", string(e.ContextInfo))
|
||||
}
|
||||
return out
|
||||
}
|
||||
@@ -63,7 +63,7 @@ var _ error = (*Error)(nil)
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status,shared_link,owned_by"
|
||||
|
||||
// Types of things in Item
|
||||
// Types of things in Item/ItemMini
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
@@ -72,20 +72,31 @@ const (
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// ItemMini is a subset of the elements in a full Item returned by some API calls
|
||||
type ItemMini struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID int64 `json:"sequence_id,string"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID int64 `json:"sequence_id,string"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
Parent ItemMini `json:"parent"`
|
||||
SharedLink struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Access string `json:"access,omitempty"`
|
||||
@@ -281,3 +292,30 @@ type User struct {
|
||||
Address string `json:"address"`
|
||||
AvatarURL string `json:"avatar_url"`
|
||||
}
|
||||
|
||||
// FileTreeChangeEventTypes are the events that can require cache invalidation
|
||||
var FileTreeChangeEventTypes = map[string]struct{}{
|
||||
"ITEM_COPY": {},
|
||||
"ITEM_CREATE": {},
|
||||
"ITEM_MAKE_CURRENT_VERSION": {},
|
||||
"ITEM_MODIFY": {},
|
||||
"ITEM_MOVE": {},
|
||||
"ITEM_RENAME": {},
|
||||
"ITEM_TRASH": {},
|
||||
"ITEM_UNDELETE_VIA_TRASH": {},
|
||||
"ITEM_UPLOAD": {},
|
||||
}
|
||||
|
||||
// Event is an array element in the response returned from /events
|
||||
type Event struct {
|
||||
EventType string `json:"event_type"`
|
||||
EventID string `json:"event_id"`
|
||||
Source Item `json:"source"`
|
||||
}
|
||||
|
||||
// Events is returned from /events
|
||||
type Events struct {
|
||||
ChunkSize int64 `json:"chunk_size"`
|
||||
Entries []Event `json:"entries"`
|
||||
NextStreamPosition int64 `json:"next_stream_position"`
|
||||
}
|
||||
|
||||
@@ -149,6 +149,23 @@ func init() {
|
||||
Default: "",
|
||||
Help: "Only show items owned by the login (email address) passed in.",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "impersonate",
|
||||
Default: "",
|
||||
Help: `Impersonate this user ID when using a service account.
|
||||
|
||||
Settng this flag allows rclone, when using a JWT service account, to
|
||||
act on behalf of another user by setting the as-user header.
|
||||
|
||||
The user ID is the Box identifier for a user. User IDs can found for
|
||||
any user via the GET /users endpoint, which is only available to
|
||||
admins, or by calling the GET /users/me endpoint with an authenticated
|
||||
user session.
|
||||
|
||||
See: https://developer.box.com/guides/authentication/jwt/as-user/
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -262,19 +279,29 @@ type Options struct {
|
||||
AccessToken string `config:"access_token"`
|
||||
ListChunk int `config:"list_chunk"`
|
||||
OwnedBy string `config:"owned_by"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
}
|
||||
|
||||
// ItemMeta defines metadata we cache for each Item ID
|
||||
type ItemMeta struct {
|
||||
SequenceID int64 // the most recent event processed for this item
|
||||
ParentID string // ID of the parent directory of this item
|
||||
Name string // leaf name of this item
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
uploadToken *pacer.TokenDispenser // control concurrency
|
||||
itemMetaCacheMu *sync.Mutex // protects itemMetaCache
|
||||
itemMetaCache map[string]ItemMeta // map of Item ID to selected metadata
|
||||
}
|
||||
|
||||
// Object describes a box object
|
||||
@@ -422,12 +449,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
srv: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
||||
itemMetaCacheMu: new(sync.Mutex),
|
||||
itemMetaCache: make(map[string]ItemMeta),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -440,6 +469,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
||||
}
|
||||
|
||||
// If using impersonate set an as-user header
|
||||
if f.opt.Impersonate != "" {
|
||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
@@ -682,6 +716,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
entries = append(entries, o)
|
||||
}
|
||||
|
||||
// Cache some metadata for this Item to help us process events later
|
||||
// on. In particular, the box event API does not provide the old path
|
||||
// of the Item when it is renamed/deleted/moved/etc.
|
||||
f.itemMetaCacheMu.Lock()
|
||||
cachedItemMeta, found := f.itemMetaCache[info.ID]
|
||||
if !found || cachedItemMeta.SequenceID < info.SequenceID {
|
||||
f.itemMetaCache[info.ID] = ItemMeta{SequenceID: info.SequenceID, ParentID: directoryID, Name: info.Name}
|
||||
}
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1121,7 +1166,7 @@ func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
||||
// CleanUp empties the trash
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
var (
|
||||
deleteErrors = int64(0)
|
||||
deleteErrors atomic.Uint64
|
||||
concurrencyControl = make(chan struct{}, fs.GetConfig(ctx).Checkers)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
@@ -1137,7 +1182,7 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
err := f.deletePermanently(ctx, item.Type, item.ID)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to delete trash item %q (%q): %v", item.Name, item.ID, err)
|
||||
atomic.AddInt64(&deleteErrors, 1)
|
||||
deleteErrors.Add(1)
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
@@ -1146,12 +1191,250 @@ func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return false
|
||||
})
|
||||
wg.Wait()
|
||||
if deleteErrors != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors)
|
||||
if deleteErrors.Load() != 0 {
|
||||
return fmt.Errorf("failed to delete %d trash items", deleteErrors.Load())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
go func() {
|
||||
// get the `stream_position` early so all changes from now on get processed
|
||||
streamPosition, err := f.changeNotifyStreamPosition(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||
}
|
||||
|
||||
var ticker *time.Ticker
|
||||
var tickerC <-chan time.Time
|
||||
for {
|
||||
select {
|
||||
case pollInterval, ok := <-pollIntervalChan:
|
||||
if !ok {
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
}
|
||||
return
|
||||
}
|
||||
if ticker != nil {
|
||||
ticker.Stop()
|
||||
ticker, tickerC = nil, nil
|
||||
}
|
||||
if pollInterval != 0 {
|
||||
ticker = time.NewTicker(pollInterval)
|
||||
tickerC = ticker.C
|
||||
}
|
||||
case <-tickerC:
|
||||
if streamPosition == "" {
|
||||
streamPosition, err = f.changeNotifyStreamPosition(ctx)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Failed to get StreamPosition: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
streamPosition, err = f.changeNotifyRunner(ctx, notifyFunc, streamPosition)
|
||||
if err != nil {
|
||||
fs.Infof(f, "Change notify listener failure: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyStreamPosition(ctx context.Context) (streamPosition string, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/events",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("stream_position", "now")
|
||||
opts.Parameters.Set("stream_type", "changes")
|
||||
|
||||
var result api.Events
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return strconv.FormatInt(result.NextStreamPosition, 10), nil
|
||||
}
|
||||
|
||||
// Attempts to construct the full path for an object, given the ID of its
|
||||
// parent directory and the name of the object.
|
||||
//
|
||||
// Can return "" if the parentID is not currently in the directory cache.
|
||||
func (f *Fs) getFullPath(parentID string, childName string) (fullPath string) {
|
||||
fullPath = ""
|
||||
name := f.opt.Enc.ToStandardName(childName)
|
||||
if parentID != "" {
|
||||
if parentDir, ok := f.dirCache.GetInv(parentID); ok {
|
||||
if len(parentDir) > 0 {
|
||||
fullPath = parentDir + "/" + name
|
||||
} else {
|
||||
fullPath = name
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No parent, this object is at the root
|
||||
fullPath = name
|
||||
}
|
||||
return fullPath
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.EntryType), streamPosition string) (nextStreamPosition string, err error) {
|
||||
nextStreamPosition = streamPosition
|
||||
|
||||
// box can send duplicate Event IDs; filter any in a single notify run
|
||||
processedEventIDs := make(map[string]bool)
|
||||
|
||||
for {
|
||||
limit := f.opt.ListChunk
|
||||
|
||||
// box only allows a max of 500 events
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/events",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
opts.Parameters.Set("stream_position", nextStreamPosition)
|
||||
opts.Parameters.Set("stream_type", "changes")
|
||||
opts.Parameters.Set("limit", strconv.Itoa(limit))
|
||||
|
||||
var result api.Events
|
||||
var resp *http.Response
|
||||
fs.Debugf(f, "Checking for changes on remote (next_stream_position: %q)", nextStreamPosition)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if result.ChunkSize != int64(len(result.Entries)) {
|
||||
return "", fmt.Errorf("invalid response to event request, chunk_size (%v) not equal to number of entries (%v)", result.ChunkSize, len(result.Entries))
|
||||
}
|
||||
|
||||
nextStreamPosition = strconv.FormatInt(result.NextStreamPosition, 10)
|
||||
if result.ChunkSize == 0 {
|
||||
return nextStreamPosition, nil
|
||||
}
|
||||
|
||||
type pathToClear struct {
|
||||
path string
|
||||
entryType fs.EntryType
|
||||
}
|
||||
var pathsToClear []pathToClear
|
||||
newEventIDs := 0
|
||||
for _, entry := range result.Entries {
|
||||
if entry.EventID == "" || processedEventIDs[entry.EventID] { // missing Event ID, or already saw this one
|
||||
continue
|
||||
}
|
||||
processedEventIDs[entry.EventID] = true
|
||||
newEventIDs++
|
||||
|
||||
if entry.Source.ID == "" { // missing File or Folder ID
|
||||
continue
|
||||
}
|
||||
if entry.Source.Type != api.ItemTypeFile && entry.Source.Type != api.ItemTypeFolder { // event is not for a file or folder
|
||||
continue
|
||||
}
|
||||
|
||||
// Only interested in event types that result in a file tree change
|
||||
if _, found := api.FileTreeChangeEventTypes[entry.EventType]; !found {
|
||||
continue
|
||||
}
|
||||
|
||||
f.itemMetaCacheMu.Lock()
|
||||
itemMeta, cachedItemMetaFound := f.itemMetaCache[entry.Source.ID]
|
||||
if cachedItemMetaFound {
|
||||
if itemMeta.SequenceID >= entry.Source.SequenceID {
|
||||
// Item in the cache has the same or newer SequenceID than
|
||||
// this event. Ignore this event, it must be old.
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
// This event is newer. Delete its entry from the cache,
|
||||
// we'll notify about its change below, then it's up to a
|
||||
// future list operation to repopulate the cache.
|
||||
delete(f.itemMetaCache, entry.Source.ID)
|
||||
}
|
||||
f.itemMetaCacheMu.Unlock()
|
||||
|
||||
entryType := fs.EntryDirectory
|
||||
if entry.Source.Type == api.ItemTypeFile {
|
||||
entryType = fs.EntryObject
|
||||
}
|
||||
|
||||
// The box event only includes the new path for the object (e.g.
|
||||
// the path after the object was moved). If there was an old path
|
||||
// saved in our cache, it must be cleared.
|
||||
if cachedItemMetaFound {
|
||||
path := f.getFullPath(itemMeta.ParentID, itemMeta.Name)
|
||||
if path != "" {
|
||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||
}
|
||||
|
||||
// If this is a directory, also delete it from the dir cache.
|
||||
// This will effectively invalidate the item metadata cache
|
||||
// entries for all descendents of this directory, since we
|
||||
// will no longer be able to construct a full path for them.
|
||||
// This is exactly what we want, since we don't want to notify
|
||||
// on the paths of these descendents if one of their ancestors
|
||||
// has been renamed/deleted.
|
||||
if entry.Source.Type == api.ItemTypeFolder {
|
||||
f.dirCache.FlushDir(path)
|
||||
}
|
||||
}
|
||||
|
||||
// If the item is "active", then it is not trashed or deleted, so
|
||||
// it potentially has a valid parent.
|
||||
//
|
||||
// Construct the new path of the object, based on the Parent ID
|
||||
// and its name. If we get an empty result, it means we don't
|
||||
// currently know about this object so notification is unnecessary.
|
||||
if entry.Source.ItemStatus == api.ItemStatusActive {
|
||||
path := f.getFullPath(entry.Source.Parent.ID, entry.Source.Name)
|
||||
if path != "" {
|
||||
pathsToClear = append(pathsToClear, pathToClear{path: path, entryType: entryType})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// box can sometimes repeatedly return the same Event IDs within a
|
||||
// short period of time. If it stops giving us new ones, treat it
|
||||
// the same as if it returned us none at all.
|
||||
if newEventIDs == 0 {
|
||||
return nextStreamPosition, nil
|
||||
}
|
||||
|
||||
notifiedPaths := make(map[string]bool)
|
||||
for _, p := range pathsToClear {
|
||||
if _, ok := notifiedPaths[p.path]; ok {
|
||||
continue
|
||||
}
|
||||
notifiedPaths[p.path] = true
|
||||
notifyFunc(p.path, p.entryType)
|
||||
}
|
||||
fs.Debugf(f, "Received %v events, resulting in %v paths and %v notifications", len(result.Entries), len(pathsToClear), len(notifiedPaths))
|
||||
}
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing as an
|
||||
// optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
||||
@@ -40,6 +40,7 @@ func TestIntegration(t *testing.T) {
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
|
||||
@@ -914,7 +914,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return do(ctx, uRemote, expire, unlink)
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
// PutUnchecked in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect"}
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "OpenChunkWriter"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
|
||||
@@ -257,6 +257,16 @@ func isMetadataFile(filename string) bool {
|
||||
return strings.HasSuffix(filename, metaFileExt)
|
||||
}
|
||||
|
||||
// Checks whether a file is a metadata file and returns the original
|
||||
// file name and a flag indicating whether it was a metadata file or
|
||||
// not.
|
||||
func unwrapMetadataFile(filename string) (string, bool) {
|
||||
if !isMetadataFile(filename) {
|
||||
return "", false
|
||||
}
|
||||
return filename[:len(filename)-len(metaFileExt)], true
|
||||
}
|
||||
|
||||
// makeDataName generates the file name for a data file with specified compression mode
|
||||
func makeDataName(remote string, size int64, mode int) (newRemote string) {
|
||||
if mode != Uncompressed {
|
||||
@@ -979,7 +989,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
fs.Logf(f, "path %q entryType %d", path, entryType)
|
||||
var (
|
||||
wrappedPath string
|
||||
wrappedPath string
|
||||
isMetadataFile bool
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
@@ -987,7 +998,10 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
|
||||
case fs.EntryObject:
|
||||
// Note: All we really need to do to monitor the object is to check whether the metadata changed,
|
||||
// as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same.
|
||||
wrappedPath = makeMetadataName(path)
|
||||
wrappedPath, isMetadataFile = unwrapMetadataFile(path)
|
||||
if !isMetadataFile {
|
||||
return
|
||||
}
|
||||
default:
|
||||
fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
|
||||
@@ -14,23 +14,26 @@ import (
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
var defaultOpt = fstests.Opt{
|
||||
RemoteName: "TestCompress:",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
|
||||
UnimplementableObjectMethods: []string{}}
|
||||
fstests.Run(t, &opt)
|
||||
fstests.Run(t, &defaultOpt)
|
||||
}
|
||||
|
||||
// TestRemoteGzip tests GZIP compression
|
||||
@@ -40,27 +43,13 @@ func TestRemoteGzip(t *testing.T) {
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-compress-test-gzip")
|
||||
name := "TestCompressGzip"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"PutUnchecked",
|
||||
"PutStream",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
opt := defaultOpt
|
||||
opt.RemoteName = name + ":"
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "compress"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "compression_mode", Value: "gzip"},
|
||||
}
|
||||
opt.QuickTestOK = true
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func TestStandardBase32(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -67,7 +67,7 @@ func TestStandardBase64(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -89,7 +89,7 @@ func TestStandardBase32768(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -111,7 +111,7 @@ func TestOff(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -137,7 +137,7 @@ func TestObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
@@ -164,7 +164,7 @@ func TestNoDataObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "no_data_encryption", Value: "true"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
QuickTestOK: true,
|
||||
})
|
||||
|
||||
@@ -594,10 +594,32 @@ This resource key requirement only applies to a subset of old files.
|
||||
|
||||
Note also that opening the folder once in the web interface (with the
|
||||
user you've authenticated rclone with) seems to be enough so that the
|
||||
resource key is no needed.
|
||||
resource key is not needed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "fast_list_bug_fix",
|
||||
Help: `Work around a bug in Google Drive listing.
|
||||
|
||||
Normally rclone will work around a bug in Google Drive when using
|
||||
--fast-list (ListR) where the search "(A in parents) or (B in
|
||||
parents)" returns nothing sometimes. See #3114, #4289 and
|
||||
https://issuetracker.google.com/issues/149522397
|
||||
|
||||
Rclone detects this by finding no items in more than one directory
|
||||
when listing and retries them as lists of individual directories.
|
||||
|
||||
This means that if you have a lot of empty directories rclone will end
|
||||
up listing them all individually and this can take many more API
|
||||
calls.
|
||||
|
||||
This flag allows the work-around to be disabled. This is **not**
|
||||
recommended in normal use - only if you have a particular case you are
|
||||
having trouble with like many empty directories.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -672,6 +694,7 @@ type Options struct {
|
||||
SkipShortcuts bool `config:"skip_shortcuts"`
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
FastListBugFix bool `config:"fast_list_bug_fix"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
@@ -1891,7 +1914,7 @@ func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in chan listRE
|
||||
// drive where (A in parents) or (B in parents) returns nothing
|
||||
// sometimes. See #3114, #4289 and
|
||||
// https://issuetracker.google.com/issues/149522397
|
||||
if len(dirs) > 1 && !foundItems {
|
||||
if f.opt.FastListBugFix && len(dirs) > 1 && !foundItems {
|
||||
if atomic.SwapInt32(&f.grouping, 1) != 1 {
|
||||
fs.Debugf(f, "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries", len(dirs))
|
||||
}
|
||||
|
||||
@@ -28,14 +28,14 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var errorRegex = regexp.MustCompile(`#\d{1,3}`)
|
||||
var errorRegex = regexp.MustCompile(`#(\d{1,3})`)
|
||||
|
||||
func parseFichierError(err error) int {
|
||||
matches := errorRegex.FindStringSubmatch(err.Error())
|
||||
if len(matches) == 0 {
|
||||
return 0
|
||||
}
|
||||
code, err := strconv.Atoi(matches[0])
|
||||
code, err := strconv.Atoi(matches[1])
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed parsing fichier error: %v", err)
|
||||
return 0
|
||||
@@ -408,6 +408,32 @@ func (f *Fs) moveFile(ctx context.Context, url string, folderID int, rename stri
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) moveDir(ctx context.Context, folderID int, newLeaf string, destinationFolderID int) (response *MoveDirResponse, err error) {
|
||||
request := &MoveDirRequest{
|
||||
FolderID: folderID,
|
||||
DestinationFolderID: destinationFolderID,
|
||||
Rename: newLeaf,
|
||||
// DestinationUser: destinationUser,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mv.cgi",
|
||||
}
|
||||
|
||||
response = &MoveDirResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't move dir: %w", err)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) copyFile(ctx context.Context, url string, folderID int, rename string) (response *CopyFileResponse, err error) {
|
||||
request := &CopyFileRequest{
|
||||
URLs: []string{url},
|
||||
|
||||
@@ -488,6 +488,51 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove.
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists.
|
||||
//
|
||||
// This is complicated by the fact that we can't use moveDir to move
|
||||
// to a different directory AND rename at the same time as it can
|
||||
// overwrite files in the source directory.
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcIDnumeric, err := strconv.Atoi(srcID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstDirectoryIDnumeric, err := strconv.Atoi(dstDirectoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var resp *MoveDirResponse
|
||||
resp, err = f.moveDir(ctx, srcIDnumeric, dstLeaf, dstDirectoryIDnumeric)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't rename leaf: %w", err)
|
||||
}
|
||||
if resp.Status != "OK" {
|
||||
return fmt.Errorf("couldn't rename leaf: %s", resp.Message)
|
||||
}
|
||||
|
||||
srcFs.dirCache.FlushDir(srcRemote)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side move operations.
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
@@ -561,6 +606,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
|
||||
@@ -70,6 +70,22 @@ type MoveFileResponse struct {
|
||||
URLs []string `json:"urls"`
|
||||
}
|
||||
|
||||
// MoveDirRequest is the request structure of the corresponding request
|
||||
type MoveDirRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
DestinationFolderID int `json:"destination_folder_id,omitempty"`
|
||||
DestinationUser string `json:"destination_user"`
|
||||
Rename string `json:"rename,omitempty"`
|
||||
}
|
||||
|
||||
// MoveDirResponse is the response structure of the corresponding request
|
||||
type MoveDirResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
OldName string `json:"old_name"`
|
||||
NewName string `json:"new_name"`
|
||||
}
|
||||
|
||||
// CopyFileRequest is the request structure of the corresponding request
|
||||
type CopyFileRequest struct {
|
||||
URLs []string `json:"urls"`
|
||||
|
||||
@@ -158,9 +158,9 @@ type Fs struct {
|
||||
tokenMu sync.Mutex // hold when reading the token
|
||||
token string // current access token
|
||||
tokenExpiry time.Time // time the current token expires
|
||||
tokenExpired int32 // read and written with atomic
|
||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||
precision time.Duration // precision reported
|
||||
tokenExpired atomic.Int32
|
||||
canCopyWithName bool // set if detected that can use fi_name in copy
|
||||
precision time.Duration // precision reported
|
||||
}
|
||||
|
||||
// Object describes a filefabric object
|
||||
@@ -243,7 +243,7 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error, st
|
||||
err = status // return the error from the RPC
|
||||
code := status.GetCode()
|
||||
if code == "login_token_expired" {
|
||||
atomic.AddInt32(&f.tokenExpired, 1)
|
||||
f.tokenExpired.Add(1)
|
||||
} else {
|
||||
for _, retryCode := range retryStatusCodes {
|
||||
if code == retryCode.code {
|
||||
@@ -323,12 +323,12 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
||||
var refreshed = false
|
||||
defer func() {
|
||||
if refreshed {
|
||||
atomic.StoreInt32(&f.tokenExpired, 0)
|
||||
f.tokenExpired.Store(0)
|
||||
}
|
||||
f.tokenMu.Unlock()
|
||||
}()
|
||||
|
||||
expired := atomic.LoadInt32(&f.tokenExpired) != 0
|
||||
expired := f.tokenExpired.Load() != 0
|
||||
if expired {
|
||||
fs.Debugf(f, "Token invalid - refreshing")
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
@@ -174,6 +175,18 @@ Enabled by default. Use 0 to disable.`,
|
||||
If this is set and no password is supplied then rclone will ask for a password
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -218,6 +231,7 @@ type Options struct {
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
AskPassword bool `config:"ask_password"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -235,7 +249,6 @@ type Fs struct {
|
||||
pool []*ftp.ServerConn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
tokens *pacer.TokenDispenser
|
||||
tlsConf *tls.Config
|
||||
pacer *fs.Pacer // pacer for FTP connections
|
||||
fGetTime bool // true if the ftp library accepts GetTime
|
||||
fSetTime bool // true if the ftp library accepts SetTime
|
||||
@@ -348,10 +361,36 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// Get a TLS config with a unique session cache.
|
||||
//
|
||||
// We can't share session caches between connections.
|
||||
//
|
||||
// See: https://github.com/rclone/rclone/issues/7234
|
||||
func (f *Fs) tlsConfig() *tls.Config {
|
||||
var tlsConfig *tls.Config
|
||||
if f.opt.TLS || f.opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if f.opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(f.opt.TLSCacheSize)
|
||||
}
|
||||
if f.opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
return tlsConfig
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
|
||||
// tls.Config for this connection only. Will be used for data
|
||||
// and control connections.
|
||||
tlsConfig := f.tlsConfig()
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
initialConnection := true
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
@@ -359,12 +398,17 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, address, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Connect using cleartext only for non TLS
|
||||
if f.tlsConf == nil {
|
||||
if tlsConfig == nil {
|
||||
return conn, nil
|
||||
}
|
||||
// Initial connection only needs to be cleartext for explicit TLS
|
||||
@@ -373,7 +417,7 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
return conn, nil
|
||||
}
|
||||
// Upgrade connection to TLS
|
||||
tlsConn := tls.Client(conn, f.tlsConf)
|
||||
tlsConn := tls.Client(conn, tlsConfig)
|
||||
// Do the initial handshake - tls.Client doesn't do it for us
|
||||
// If we do this then connections to proftpd/pureftpd lock up
|
||||
// See: https://github.com/rclone/rclone/issues/6426
|
||||
@@ -395,9 +439,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
// as a trigger for sending PSBZ and PROT options to server.
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -552,19 +596,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
if opt.TLS && opt.ExplicitTLS {
|
||||
return nil, errors.New("implicit TLS and explicit TLS are mutually incompatible, please revise your config")
|
||||
}
|
||||
var tlsConfig *tls.Config
|
||||
if opt.TLS || opt.ExplicitTLS {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: opt.Host,
|
||||
InsecureSkipVerify: opt.SkipVerifyTLSCert,
|
||||
}
|
||||
if opt.TLSCacheSize > 0 {
|
||||
tlsConfig.ClientSessionCache = tls.NewLRUClientSessionCache(opt.TLSCacheSize)
|
||||
}
|
||||
if opt.DisableTLS13 {
|
||||
tlsConfig.MaxVersion = tls.VersionTLS12
|
||||
}
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
ci := fs.GetConfig(ctx)
|
||||
f := &Fs{
|
||||
@@ -577,7 +608,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
tokens: pacer.NewTokenDispenser(opt.Concurrency),
|
||||
tlsConf: tlsConfig,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -23,6 +23,7 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*hasher.Object)(nil),
|
||||
UnimplementableFsMethods: []string{
|
||||
"OpenWriterAt",
|
||||
"OpenChunkWriter",
|
||||
},
|
||||
UnimplementableObjectMethods: []string{},
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
// Fs represents a HDFS server
|
||||
@@ -31,8 +32,15 @@ type Fs struct {
|
||||
opt Options // options for this backend
|
||||
ci *fs.ConfigInfo // global config
|
||||
client *hdfs.Client
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
}
|
||||
|
||||
const (
|
||||
minSleep = 20 * time.Millisecond
|
||||
maxSleep = 10 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
// copy-paste from https://github.com/colinmarc/hdfs/blob/master/cmd/hdfs/kerberos.go
|
||||
func getKerberosClient() (*krb.Client, error) {
|
||||
configPath := os.Getenv("KRB5_CONFIG")
|
||||
@@ -114,6 +122,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
ci: fs.GetConfig(ctx),
|
||||
client: client,
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
|
||||
@@ -5,10 +5,12 @@ package hdfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/colinmarc/hdfs/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -106,7 +108,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Update object
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
realpath := o.fs.realpath(src.Remote())
|
||||
realpath := o.fs.realpath(o.remote)
|
||||
dirname := path.Dir(realpath)
|
||||
fs.Debugf(o.fs, "update [%s]", realpath)
|
||||
|
||||
@@ -141,7 +143,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
err = out.Close()
|
||||
// If the datanodes have acknowledged all writes but not yet
|
||||
// to the namenode, FileWriter.Close can return ErrReplicating
|
||||
// (wrapped in an os.PathError). This indicates that all data
|
||||
// has been written, but the lease is still open for the file.
|
||||
//
|
||||
// It is safe in this case to either ignore the error (and let
|
||||
// the lease expire on its own) or to call Close multiple
|
||||
// times until it completes without an error. The Java client,
|
||||
// for context, always chooses to retry, with exponential
|
||||
// backoff.
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err := out.Close()
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return errors.Is(err, hdfs.ErrReplicating), err
|
||||
})
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return err
|
||||
|
||||
@@ -67,9 +67,13 @@ const (
|
||||
legacyEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
|
||||
legacyConfigVersion = 0
|
||||
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
teliaseCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaseCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaseCloudClientID = "desktop"
|
||||
|
||||
telianoCloudTokenURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/token"
|
||||
telianoCloudAuthURL = "https://sky-auth.telia.no/auth/realms/get/protocol/openid-connect/auth"
|
||||
telianoCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
@@ -138,8 +142,11 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
Value: "legacy",
|
||||
Help: "Legacy authentication.\nThis is only required for certain whitelabel versions of Jottacloud and not recommended for normal users.",
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||
Value: "telia_se",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud (Sweden).",
|
||||
}, {
|
||||
Value: "telia_no",
|
||||
Help: "Telia Sky authentication.\nUse this if you are using Telia Sky (Norway).",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
@@ -238,17 +245,32 @@ machines.`)
|
||||
return nil, fmt.Errorf("error while saving token: %w", err)
|
||||
}
|
||||
return fs.ConfigGoto("choose_device")
|
||||
case "telia": // telia cloud config
|
||||
case "telia_se": // telia_se cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, teliaCloudClientID)
|
||||
m.Set(configTokenURL, teliaCloudTokenURL)
|
||||
m.Set(configClientID, teliaseCloudClientID)
|
||||
m.Set(configTokenURL, teliaseCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: teliaCloudAuthURL,
|
||||
TokenURL: teliaCloudTokenURL,
|
||||
AuthURL: teliaseCloudAuthURL,
|
||||
TokenURL: teliaseCloudTokenURL,
|
||||
},
|
||||
ClientID: teliaCloudClientID,
|
||||
ClientID: teliaseCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "telia_no": // telia_no cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, telianoCloudClientID)
|
||||
m.Set(configTokenURL, telianoCloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: telianoCloudAuthURL,
|
||||
TokenURL: telianoCloudTokenURL,
|
||||
},
|
||||
ClientID: telianoCloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
@@ -243,7 +244,7 @@ type Fs struct {
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
xattrSupported int32 // whether xattrs are supported (atomic access)
|
||||
xattrSupported atomic.Int32 // whether xattrs are supported
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -291,7 +292,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
if xattrSupported {
|
||||
f.xattrSupported = 1
|
||||
f.xattrSupported.Store(1)
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
@@ -641,7 +642,13 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return os.Remove(f.localPath(dir))
|
||||
localPath := f.localPath(dir)
|
||||
if fi, err := os.Stat(localPath); err != nil {
|
||||
return err
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
@@ -6,7 +6,6 @@ package local
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
@@ -28,7 +27,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
// Xattrs not supported can be ENOTSUP or ENOATTR or EINVAL (on Solaris)
|
||||
if xattrErr.Err == syscall.EINVAL || xattrErr.Err == syscall.ENOTSUP || xattrErr.Err == xattr.ENOATTR {
|
||||
// Show xattrs not supported
|
||||
if atomic.CompareAndSwapInt32(&f.xattrSupported, 1, 0) {
|
||||
if f.xattrSupported.CompareAndSwap(1, 0) {
|
||||
fs.Errorf(f, "xattrs not supported - disabling: %v", err)
|
||||
}
|
||||
return true
|
||||
@@ -41,7 +40,7 @@ func (f *Fs) xattrIsNotSupported(err error) bool {
|
||||
// It doesn't return any attributes owned by this backend in
|
||||
// metadataKeys
|
||||
func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var list []string
|
||||
@@ -90,7 +89,7 @@ func (o *Object) getXattr() (metadata fs.Metadata, err error) {
|
||||
//
|
||||
// It doesn't set any attributes owned by this backend in metadataKeys
|
||||
func (o *Object) setXattr(metadata fs.Metadata) (err error) {
|
||||
if !xattrSupported || atomic.LoadInt32(&o.fs.xattrSupported) == 0 {
|
||||
if !xattrSupported || o.fs.xattrSupported.Load() == 0 {
|
||||
return nil
|
||||
}
|
||||
for k, value := range metadata {
|
||||
|
||||
@@ -767,6 +767,17 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
if apiError, ok := err.(*Error); ok {
|
||||
// Work around a bug maybe in opendrive or maybe in rclone.
|
||||
//
|
||||
// We should know whether the folder exists or not by the call to
|
||||
// FindDir above so exactly why it is not found here is a mystery.
|
||||
//
|
||||
// This manifests as a failure in fs/sync TestSyncOverlapWithFilter
|
||||
if apiError.Info.Message == "Folder is already deleted" {
|
||||
return fs.DirEntries{}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get folder list: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -128,18 +127,3 @@ func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package oracleobjectstorage
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -196,6 +197,32 @@ func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
return f.listMultipartUploadsObject(ctx, bucketName, directory, false)
|
||||
}
|
||||
|
||||
// listMultipartUploads finds first outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) findLatestMultipartUpload(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
pastUploads, err := f.listMultipartUploadsObject(ctx, bucketName, directory, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pastUploads) > 0 {
|
||||
sort.Slice(pastUploads, func(i, j int) bool {
|
||||
return pastUploads[i].TimeCreated.After(pastUploads[j].TimeCreated.Time)
|
||||
})
|
||||
return pastUploads[:1], nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsObject(ctx context.Context, bucketName, directory string, exact bool) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
@@ -217,7 +244,13 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
if exact {
|
||||
if *item.Object == directory {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
} else {
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
@@ -226,3 +259,34 @@ func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory str
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPath string, uploadID string) (
|
||||
uploadedParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
uploadedParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
req := objectstorage.ListMultipartUploadPartsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
Limit: common.Int(1000),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadPartsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploadParts(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return uploadedParts, err
|
||||
}
|
||||
for _, item := range response.Items {
|
||||
uploadedParts[*item.PartNumber] = item
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
441
backend/oracleobjectstorage/multipart.go
Normal file
441
backend/oracleobjectstorage/multipart.go
Normal file
@@ -0,0 +1,441 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/lib/multipart"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
// Info needed for an upload
|
||||
type uploadInfo struct {
|
||||
req *objectstorage.PutObjectRequest
|
||||
md5sumHex string
|
||||
}
|
||||
|
||||
type objectChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
bucket *string
|
||||
key *string
|
||||
uploadID *string
|
||||
partsToCommit []objectstorage.CommitMultipartUploadPartDetails
|
||||
partsToCommitMu sync.Mutex
|
||||
existingParts map[int]objectstorage.MultipartUploadPartSummary
|
||||
eTag string
|
||||
md5sMu sync.Mutex
|
||||
md5s []byte
|
||||
ui uploadInfo
|
||||
o *Object
|
||||
}
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
|
||||
_, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
|
||||
Open: o.fs,
|
||||
OpenOptions: options,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
//
|
||||
// Pass in the remote and the src object
|
||||
// You can also use options to hint at the desired chunk size
|
||||
func (f *Fs) OpenChunkWriter(
|
||||
ctx context.Context,
|
||||
remote string,
|
||||
src fs.ObjectInfo,
|
||||
options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
||||
uploadParts := f.opt.MaxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
} else if uploadParts > maxUploadParts {
|
||||
uploadParts = maxUploadParts
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// calculate size of parts
|
||||
chunkSize := f.opt.ChunkSize
|
||||
|
||||
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
|
||||
// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
|
||||
// 48 GiB which seems like a not too unreasonable limit.
|
||||
if size == -1 {
|
||||
warnStreamUpload.Do(func() {
|
||||
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
|
||||
f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
|
||||
})
|
||||
} else {
|
||||
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
|
||||
}
|
||||
|
||||
uploadID, existingParts, err := o.createMultipartUpload(ctx, ui.req)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("create multipart upload request failed: %w", err)
|
||||
}
|
||||
bucketName, bucketPath := o.split()
|
||||
chunkWriter := &objectChunkWriter{
|
||||
chunkSize: int64(chunkSize),
|
||||
size: size,
|
||||
f: f,
|
||||
bucket: &bucketName,
|
||||
key: &bucketPath,
|
||||
uploadID: &uploadID,
|
||||
existingParts: existingParts,
|
||||
ui: ui,
|
||||
o: o,
|
||||
}
|
||||
info = fs.ChunkWriterInfo{
|
||||
ChunkSize: int64(chunkSize),
|
||||
Concurrency: o.fs.opt.UploadConcurrency,
|
||||
LeavePartsOnError: o.fs.opt.LeavePartsOnError,
|
||||
}
|
||||
fs.Debugf(o, "open chunk writer: started multipart upload: %v", uploadID)
|
||||
return info, chunkWriter, err
|
||||
}
|
||||
|
||||
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
|
||||
func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
|
||||
if chunkNumber < 0 {
|
||||
err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
|
||||
return -1, err
|
||||
}
|
||||
// Only account after the checksum reads have been done
|
||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||
// multiple of what it should be, increase or decrease this number.
|
||||
do.DelayAccounting(2)
|
||||
}
|
||||
m := md5.New()
|
||||
currentChunkSize, err := io.Copy(m, reader)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
// If no data read, don't write the chunk
|
||||
if currentChunkSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
md5sumBinary := m.Sum([]byte{})
|
||||
w.addMd5(&md5sumBinary, int64(chunkNumber))
|
||||
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
|
||||
|
||||
// Object storage requires 1 <= PartNumber <= 10000
|
||||
ossPartNumber := chunkNumber + 1
|
||||
if existing, ok := w.existingParts[ossPartNumber]; ok {
|
||||
if md5sum == *existing.Md5 {
|
||||
fs.Debugf(w.o, "matched uploaded part found, part num %d, skipping part, md5=%v", *existing.PartNumber, md5sum)
|
||||
w.addCompletedPart(existing.PartNumber, existing.Etag)
|
||||
return currentChunkSize, nil
|
||||
}
|
||||
}
|
||||
req := objectstorage.UploadPartRequest{
|
||||
NamespaceName: common.String(w.f.opt.Namespace),
|
||||
BucketName: w.bucket,
|
||||
ObjectName: w.key,
|
||||
UploadId: w.uploadID,
|
||||
UploadPartNum: common.Int(ossPartNumber),
|
||||
ContentLength: common.Int64(currentChunkSize),
|
||||
ContentMD5: common.String(md5sum),
|
||||
}
|
||||
w.o.applyPartUploadOptions(w.ui.req, &req)
|
||||
var resp objectstorage.UploadPartResponse
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
// req.UploadPartBody = io.NopCloser(bytes.NewReader(buf))
|
||||
// rewind the reader on retry and after reading md5
|
||||
_, err = reader.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.UploadPartBody = io.NopCloser(reader)
|
||||
resp, err = w.f.srv.UploadPart(ctx, req)
|
||||
if err != nil {
|
||||
if ossPartNumber <= 8 {
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
}
|
||||
// retry all chunks once have done the first few
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(w.o, "multipart upload failed to upload part:%d err: %v", ossPartNumber, err)
|
||||
return -1, fmt.Errorf("multipart upload failed to upload part: %w", err)
|
||||
}
|
||||
w.addCompletedPart(&ossPartNumber, resp.ETag)
|
||||
return currentChunkSize, err
|
||||
|
||||
}
|
||||
|
||||
// add a part number and etag to the completed parts
|
||||
func (w *objectChunkWriter) addCompletedPart(partNum *int, eTag *string) {
|
||||
w.partsToCommitMu.Lock()
|
||||
defer w.partsToCommitMu.Unlock()
|
||||
w.partsToCommit = append(w.partsToCommit, objectstorage.CommitMultipartUploadPartDetails{
|
||||
PartNum: partNum,
|
||||
Etag: eTag,
|
||||
})
|
||||
}
|
||||
|
||||
func (w *objectChunkWriter) Close(ctx context.Context) (err error) {
|
||||
req := objectstorage.CommitMultipartUploadRequest{
|
||||
NamespaceName: common.String(w.f.opt.Namespace),
|
||||
BucketName: w.bucket,
|
||||
ObjectName: w.key,
|
||||
UploadId: w.uploadID,
|
||||
}
|
||||
req.PartsToCommit = w.partsToCommit
|
||||
var resp objectstorage.CommitMultipartUploadResponse
|
||||
err = w.f.pacer.Call(func() (bool, error) {
|
||||
resp, err = w.f.srv.CommitMultipartUpload(ctx, req)
|
||||
// if multipart is corrupted, we will abort the uploadId
|
||||
if isMultiPartUploadCorrupted(err) {
|
||||
fs.Debugf(w.o, "multipart uploadId %v is corrupted, aborting...", *w.uploadID)
|
||||
_ = w.Abort(ctx)
|
||||
return false, err
|
||||
}
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.eTag = *resp.ETag
|
||||
hashOfHashes := md5.Sum(w.md5s)
|
||||
wantMultipartMd5 := fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(hashOfHashes[:]), len(w.partsToCommit))
|
||||
gotMultipartMd5 := *resp.OpcMultipartMd5
|
||||
if wantMultipartMd5 != gotMultipartMd5 {
|
||||
fs.Errorf(w.o, "multipart upload corrupted: multipart md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
|
||||
return fmt.Errorf("multipart upload corrupted: md5 differ: expecting %s but got %s", wantMultipartMd5, gotMultipartMd5)
|
||||
}
|
||||
fs.Debugf(w.o, "multipart upload %v md5 matched: expecting %s and got %s", *w.uploadID, wantMultipartMd5, gotMultipartMd5)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isMultiPartUploadCorrupted(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// Check if this oci-err object, and if it is multipart commit error
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "InvalidUploadPart" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *objectChunkWriter) Abort(ctx context.Context) error {
|
||||
fs.Debugf(w.o, "Cancelling multipart upload")
|
||||
err := w.o.fs.abortMultiPartUpload(
|
||||
ctx,
|
||||
w.bucket,
|
||||
w.key,
|
||||
w.uploadID)
|
||||
if err != nil {
|
||||
fs.Debugf(w.o, "Failed to cancel multipart upload: %v", err)
|
||||
} else {
|
||||
fs.Debugf(w.o, "canceled and aborted multipart upload: %v", *w.uploadID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// addMd5 adds a binary md5 to the md5 calculated so far
|
||||
func (w *objectChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
|
||||
w.md5sMu.Lock()
|
||||
defer w.md5sMu.Unlock()
|
||||
start := chunkNumber * md5.Size
|
||||
end := start + md5.Size
|
||||
if extend := end - int64(len(w.md5s)); extend > 0 {
|
||||
w.md5s = append(w.md5s, make([]byte, extend)...)
|
||||
}
|
||||
copy(w.md5s[start:end], (*md5binary)[:])
|
||||
}
|
||||
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
ui.req = &objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, src, options)
|
||||
if err != nil {
|
||||
return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
ui.req.OpcMeta = make(map[string]string, len(meta)+2)
|
||||
// merge metadata into request and user metadata
|
||||
for k, v := range meta {
|
||||
pv := common.String(v)
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "cache-control":
|
||||
ui.req.CacheControl = pv
|
||||
case "content-disposition":
|
||||
ui.req.ContentDisposition = pv
|
||||
case "content-encoding":
|
||||
ui.req.ContentEncoding = pv
|
||||
case "content-language":
|
||||
ui.req.ContentLanguage = pv
|
||||
case "content-type":
|
||||
ui.req.ContentType = pv
|
||||
case "tier":
|
||||
// ignore
|
||||
case "mtime":
|
||||
// mtime in meta overrides source ModTime
|
||||
metaModTime, err := time.Parse(time.RFC3339Nano, v)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
|
||||
} else {
|
||||
modTime = metaModTime
|
||||
}
|
||||
case "btime":
|
||||
// write as metadata since we can't set it
|
||||
ui.req.OpcMeta[k] = v
|
||||
default:
|
||||
ui.req.OpcMeta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Set the mtime in the metadata
|
||||
ui.req.OpcMeta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
size := src.Size()
|
||||
isMultipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
var md5sumBase64 string
|
||||
if !isMultipart || !o.fs.opt.DisableChecksum {
|
||||
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if isMultipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
ui.req.OpcMeta[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Set the content type if it isn't set already
|
||||
if ui.req.ContentType == nil {
|
||||
ui.req.ContentType = common.String(fs.MimeType(ctx, src))
|
||||
}
|
||||
if size >= 0 {
|
||||
ui.req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if md5sumBase64 != "" {
|
||||
ui.req.ContentMD5 = &md5sumBase64
|
||||
}
|
||||
o.applyPutOptions(ui.req, options...)
|
||||
useBYOKPutObject(o.fs, ui.req)
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return ui, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
ui.req.StorageTier = storageTier
|
||||
}
|
||||
// Check metadata keys and values are valid
|
||||
for key, value := range ui.req.OpcMeta {
|
||||
if !httpguts.ValidHeaderFieldName(key) {
|
||||
fs.Errorf(o, "Dropping invalid metadata key %q", key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
} else if value == "" {
|
||||
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
} else if !httpguts.ValidHeaderFieldValue(value) {
|
||||
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", value, key)
|
||||
delete(ui.req.OpcMeta, key)
|
||||
}
|
||||
}
|
||||
return ui, nil
|
||||
}
|
||||
|
||||
func (o *Object) createMultipartUpload(ctx context.Context, putReq *objectstorage.PutObjectRequest) (
|
||||
uploadID string, existingParts map[int]objectstorage.MultipartUploadPartSummary, err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
f := o.fs
|
||||
if f.opt.AttemptResumeUpload {
|
||||
fs.Debugf(o, "attempting to resume upload for %v (if any)", o.remote)
|
||||
resumeUploads, err := o.fs.findLatestMultipartUpload(ctx, bucketName, bucketPath)
|
||||
if err == nil && len(resumeUploads) > 0 {
|
||||
uploadID = *resumeUploads[0].UploadId
|
||||
existingParts, err = f.listMultipartUploadParts(ctx, bucketName, bucketPath, uploadID)
|
||||
if err == nil {
|
||||
fs.Debugf(o, "resuming with existing upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
}
|
||||
}
|
||||
req := objectstorage.CreateMultipartUploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
req.Object = common.String(bucketPath)
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return "", nil, fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultipartUploadOptions(putReq, &req)
|
||||
|
||||
var resp objectstorage.CreateMultipartUploadResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CreateMultipartUpload(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return "", existingParts, err
|
||||
}
|
||||
existingParts = make(map[int]objectstorage.MultipartUploadPartSummary)
|
||||
uploadID = *resp.UploadId
|
||||
fs.Debugf(o, "created new upload id: %v", uploadID)
|
||||
return uploadID, existingParts, err
|
||||
}
|
||||
@@ -4,12 +4,14 @@
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -18,10 +20,8 @@ import (
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -367,9 +367,28 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
func isZeroLength(streamReader io.Reader) bool {
|
||||
switch v := streamReader.(type) {
|
||||
case *bytes.Buffer:
|
||||
return v.Len() == 0
|
||||
case *bytes.Reader:
|
||||
return v.Len() == 0
|
||||
case *strings.Reader:
|
||||
return v.Len() == 0
|
||||
case *os.File:
|
||||
fi, err := v.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.Size() == 0
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
bucketName, _ := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -377,142 +396,24 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if isZeroLength(in) {
|
||||
multipart = false
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
err = o.uploadMultipart(ctx, src, in)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
ui.req.PutObjectBody = io.NopCloser(in)
|
||||
resp, err = o.fs.srv.PutObject(ctx, *ui.req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -591,28 +492,24 @@ func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, opti
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (o *Object) applyMultipartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.CreateMultipartUploadRequest) {
|
||||
req.ContentType = putReq.ContentType
|
||||
req.ContentLanguage = putReq.ContentLanguage
|
||||
req.ContentEncoding = putReq.ContentEncoding
|
||||
req.ContentDisposition = putReq.ContentDisposition
|
||||
req.CacheControl = putReq.CacheControl
|
||||
req.Metadata = metadataWithOpcPrefix(putReq.OpcMeta)
|
||||
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
|
||||
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
|
||||
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
|
||||
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
|
||||
}
|
||||
|
||||
func (o *Object) applyPartUploadOptions(putReq *objectstorage.PutObjectRequest, req *objectstorage.UploadPartRequest) {
|
||||
req.OpcSseCustomerAlgorithm = putReq.OpcSseCustomerAlgorithm
|
||||
req.OpcSseCustomerKey = putReq.OpcSseCustomerKey
|
||||
req.OpcSseCustomerKeySha256 = putReq.OpcSseCustomerKeySha256
|
||||
req.OpcSseKmsKeyId = putReq.OpcSseKmsKeyId
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
|
||||
@@ -13,9 +13,10 @@ import (
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadParts = 10000
|
||||
defaultUploadConcurrency = 10
|
||||
minChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
@@ -55,12 +56,14 @@ type Options struct {
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
AttemptResumeUpload bool `config:"attempt_resume_upload"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
@@ -157,9 +160,8 @@ The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
|
||||
as multipart uploads using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
@@ -181,6 +183,20 @@ statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "max_upload_parts",
|
||||
Help: `Maximum number of parts in a multipart upload.
|
||||
|
||||
This option defines the maximum number of multipart chunks to use
|
||||
when doing a multipart upload.
|
||||
|
||||
OCI has max parts limit of 10,000 chunks.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of a known size to stay below this number of chunks limit.
|
||||
`,
|
||||
Default: maxUploadParts,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
@@ -238,12 +254,24 @@ to start uploading.`,
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "attempt_resume_upload",
|
||||
Help: `If true attempt to resume previously started multipart upload for the object.
|
||||
This will be helpful to speed up multipart transfers by resuming uploads from past session.
|
||||
|
||||
WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
|
||||
aborted and a new multipart upload is started with the new chunk size.
|
||||
|
||||
The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
|
||||
@@ -318,7 +318,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
@@ -558,15 +557,15 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID *string) (err error) {
|
||||
if uploadID == nil || *uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
BucketName: bucketName,
|
||||
ObjectName: bucketPath,
|
||||
UploadId: uploadID,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
@@ -589,7 +588,7 @@ func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Durat
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
_ = f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
_ = f.abortMultiPartUpload(ctx, upload.Bucket, upload.Object, upload.UploadId)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
@@ -684,12 +683,13 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.OpenChunkWriter = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
|
||||
1114
backend/protondrive/protondrive.go
Normal file
1114
backend/protondrive/protondrive.go
Normal file
File diff suppressed because it is too large
Load Diff
16
backend/protondrive/protondrive_test.go
Normal file
16
backend/protondrive/protondrive_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package protondrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/protondrive"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestProtonDrive:",
|
||||
NilObject: (*protondrive.Object)(nil),
|
||||
})
|
||||
}
|
||||
182
backend/quatrix/api/types.go
Normal file
182
backend/quatrix/api/types.go
Normal file
@@ -0,0 +1,182 @@
|
||||
// Package api provides types used by the Quatrix API.
|
||||
package api
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// OverwriteOnCopyMode is a conflict resolve mode during copy. Files with conflicting names will be overwritten
|
||||
const OverwriteOnCopyMode = "overwrite"
|
||||
|
||||
// ProfileInfo is a profile info about quota
|
||||
type ProfileInfo struct {
|
||||
UserUsed int64 `json:"user_used"`
|
||||
UserLimit int64 `json:"user_limit"`
|
||||
AccUsed int64 `json:"acc_used"`
|
||||
AccLimit int64 `json:"acc_limit"`
|
||||
}
|
||||
|
||||
// IDList is a general object that contains list of ids
|
||||
type IDList struct {
|
||||
IDs []string `json:"ids"`
|
||||
}
|
||||
|
||||
// DeleteParams is the request to delete object
|
||||
type DeleteParams struct {
|
||||
IDs []string `json:"ids"`
|
||||
DeletePermanently bool `json:"delete_permanently"`
|
||||
}
|
||||
|
||||
// FileInfoParams is the request to get object's (file or directory) info
|
||||
type FileInfoParams struct {
|
||||
ParentID string `json:"parent_id,omitempty"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// FileInfo is the response to get object's (file or directory) info
|
||||
type FileInfo struct {
|
||||
FileID string `json:"file_id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Src string `json:"src"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// IsFile returns true if object is a file
|
||||
// false otherwise
|
||||
func (fi *FileInfo) IsFile() bool {
|
||||
if fi == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.Type == "F"
|
||||
}
|
||||
|
||||
// IsDir returns true if object is a directory
|
||||
// false otherwise
|
||||
func (fi *FileInfo) IsDir() bool {
|
||||
if fi == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.Type == "D" || fi.Type == "S" || fi.Type == "T"
|
||||
}
|
||||
|
||||
// CreateDirParams is the request to create a directory
|
||||
type CreateDirParams struct {
|
||||
Target string `json:"target,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Resolve bool `json:"resolve"`
|
||||
}
|
||||
|
||||
// File represent metadata about object in Quatrix (file or directory)
|
||||
type File struct {
|
||||
ID string `json:"id"`
|
||||
Created JSONTime `json:"created"`
|
||||
Modified JSONTime `json:"modified"`
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Size int64 `json:"size"`
|
||||
ModifiedMS JSONTime `json:"modified_ms"`
|
||||
Type string `json:"type"`
|
||||
Operations int `json:"operations"`
|
||||
SubType string `json:"sub_type"`
|
||||
Content []File `json:"content"`
|
||||
}
|
||||
|
||||
// IsFile returns true if object is a file
|
||||
// false otherwise
|
||||
func (f *File) IsFile() bool {
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.Type == "F"
|
||||
}
|
||||
|
||||
// IsDir returns true if object is a directory
|
||||
// false otherwise
|
||||
func (f *File) IsDir() bool {
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.Type == "D" || f.Type == "S" || f.Type == "T"
|
||||
}
|
||||
|
||||
// SetMTimeParams is the request to set modification time for object
|
||||
type SetMTimeParams struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
MTime JSONTime `json:"mtime"`
|
||||
}
|
||||
|
||||
// JSONTime provides methods to marshal/unmarshal time.Time as Unix time
|
||||
type JSONTime time.Time
|
||||
|
||||
// MarshalJSON returns time representation in Unix time
|
||||
func (u JSONTime) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.FormatFloat(float64(time.Time(u).UTC().UnixNano())/1e9, 'f', 6, 64)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets time from Unix time representation
|
||||
func (u *JSONTime) UnmarshalJSON(data []byte) error {
|
||||
f, err := strconv.ParseFloat(string(data), 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := JSONTime(time.Unix(0, int64(f*1e9)))
|
||||
*u = t
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns Unix time representation of time as string
|
||||
func (u JSONTime) String() string {
|
||||
return strconv.FormatInt(time.Time(u).UTC().Unix(), 10)
|
||||
}
|
||||
|
||||
// DownloadLinkResponse is the response to download-link request
|
||||
type DownloadLinkResponse struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// UploadLinkParams is the request to get upload-link
|
||||
type UploadLinkParams struct {
|
||||
Name string `json:"name"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Resolve bool `json:"resolve"`
|
||||
}
|
||||
|
||||
// UploadLinkResponse is the response to upload-link request
|
||||
type UploadLinkResponse struct {
|
||||
Name string `json:"name"`
|
||||
FileID string `json:"file_id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
UploadKey string `json:"upload_key"`
|
||||
}
|
||||
|
||||
// UploadFinalizeResponse is the response to finalize file method
|
||||
type UploadFinalizeResponse struct {
|
||||
FileID string `json:"id"`
|
||||
ParentID string `json:"parent_id"`
|
||||
Modified int64 `json:"modified"`
|
||||
FileSize int64 `json:"size"`
|
||||
}
|
||||
|
||||
// FileModifyParams is the request to get modify file link
|
||||
type FileModifyParams struct {
|
||||
ID string `json:"id"`
|
||||
Truncate int64 `json:"truncate"`
|
||||
}
|
||||
|
||||
// FileCopyMoveOneParams is the request to do server-side copy and move
|
||||
// can be used for file or directory
|
||||
type FileCopyMoveOneParams struct {
|
||||
ID string `json:"file_id"`
|
||||
Target string `json:"target_id"`
|
||||
Name string `json:"name"`
|
||||
MTime JSONTime `json:"mtime"`
|
||||
Resolve bool `json:"resolve"`
|
||||
ResolveMode string `json:"resolve_mode"`
|
||||
}
|
||||
1254
backend/quatrix/quatrix.go
Normal file
1254
backend/quatrix/quatrix.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/quatrix/quatrix_test.go
Normal file
17
backend/quatrix/quatrix_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Quatrix filesystem interface
|
||||
package quatrix_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/quatrix"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestQuatrix:",
|
||||
NilObject: (*quatrix.Object)(nil),
|
||||
})
|
||||
}
|
||||
108
backend/quatrix/upload_memory.go
Normal file
108
backend/quatrix/upload_memory.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package quatrix
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// UploadMemoryManager dynamically calculates every chunk size for the transfer and increases or decreases it
|
||||
// depending on the upload speed. This makes general upload time smaller, because transfers that are faster
|
||||
// does not have to wait for the slower ones until they finish upload.
|
||||
type UploadMemoryManager struct {
|
||||
m sync.Mutex
|
||||
useDynamicSize bool
|
||||
shared int64
|
||||
reserved int64
|
||||
effectiveTime time.Duration
|
||||
fileUsage map[string]int64
|
||||
}
|
||||
|
||||
// NewUploadMemoryManager is a constructor for UploadMemoryManager
|
||||
func NewUploadMemoryManager(ci *fs.ConfigInfo, opt *Options) *UploadMemoryManager {
|
||||
useDynamicSize := true
|
||||
|
||||
sharedMemory := int64(opt.MaximalSummaryChunkSize) - int64(opt.MinimalChunkSize)*int64(ci.Transfers)
|
||||
if sharedMemory <= 0 {
|
||||
sharedMemory = 0
|
||||
useDynamicSize = false
|
||||
}
|
||||
|
||||
return &UploadMemoryManager{
|
||||
useDynamicSize: useDynamicSize,
|
||||
shared: sharedMemory,
|
||||
reserved: int64(opt.MinimalChunkSize),
|
||||
effectiveTime: time.Duration(opt.EffectiveUploadTime),
|
||||
fileUsage: map[string]int64{},
|
||||
}
|
||||
}
|
||||
|
||||
// Consume -- decide amount of memory to consume
|
||||
func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed float64) int64 {
|
||||
if !u.useDynamicSize {
|
||||
if neededMemory < u.reserved {
|
||||
return neededMemory
|
||||
}
|
||||
|
||||
return u.reserved
|
||||
}
|
||||
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
borrowed, found := u.fileUsage[fileID]
|
||||
if found {
|
||||
u.shared += borrowed
|
||||
borrowed = 0
|
||||
}
|
||||
|
||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||
|
||||
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
|
||||
|
||||
if effectiveChunkSize < u.reserved {
|
||||
effectiveChunkSize = u.reserved
|
||||
}
|
||||
|
||||
if neededMemory < effectiveChunkSize {
|
||||
effectiveChunkSize = neededMemory
|
||||
}
|
||||
|
||||
if effectiveChunkSize <= u.reserved {
|
||||
return effectiveChunkSize
|
||||
}
|
||||
|
||||
toBorrow := effectiveChunkSize - u.reserved
|
||||
|
||||
if toBorrow <= u.shared {
|
||||
u.shared -= toBorrow
|
||||
borrowed = toBorrow
|
||||
|
||||
return effectiveChunkSize
|
||||
}
|
||||
|
||||
borrowed = u.shared
|
||||
u.shared = 0
|
||||
|
||||
return borrowed + u.reserved
|
||||
}
|
||||
|
||||
// Return returns consumed memory for the previous chunk upload to the memory pool
|
||||
func (u *UploadMemoryManager) Return(fileID string) {
|
||||
if !u.useDynamicSize {
|
||||
return
|
||||
}
|
||||
|
||||
u.m.Lock()
|
||||
defer u.m.Unlock()
|
||||
|
||||
borrowed, found := u.fileUsage[fileID]
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
u.shared += borrowed
|
||||
|
||||
delete(u.fileUsage, fileID)
|
||||
}
|
||||
719
backend/s3/s3.go
719
backend/s3/s3.go
File diff suppressed because it is too large
Load Diff
@@ -17,17 +17,17 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
var count atomic.Int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
count.Add(1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
renewCount := count.Load()
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
@@ -169,7 +168,19 @@ E.g. if shared folders can be found in directories representing volumes:
|
||||
|
||||
E.g. if home directory can be found in a shared folder called "home":
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory
|
||||
|
||||
To specify only the path to the SFTP remote's root, and allow rclone to add any relative subpaths automatically (including unwrapping/decrypting remotes as necessary), add the '@' character to the beginning of the path.
|
||||
|
||||
E.g. the first example above could be rewritten as:
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --sftp-path-override @/volume2
|
||||
|
||||
Note that when using this method with Synology "home" folders, the full "/homes/USER" path should be specified instead of "/home".
|
||||
|
||||
E.g. the second example above should be rewritten as:
|
||||
|
||||
rclone sync /home/local/directory remote:/homes/USER/directory --sftp-path-override @/volume1`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -221,7 +232,16 @@ E.g. if home directory can be found in a shared folder called "home":
|
||||
Default: "",
|
||||
Help: `Specifies the path or command to run a sftp server on the remote host.
|
||||
|
||||
The subsystem option is ignored when server_command is defined.`,
|
||||
The subsystem option is ignored when server_command is defined.
|
||||
|
||||
If adding server_command to the configuration file please note that
|
||||
it should not be enclosed in quotes, since that will make rclone fail.
|
||||
|
||||
A working example is:
|
||||
|
||||
[remote_name]
|
||||
type = sftp
|
||||
server_command = sudo /usr/libexec/openssh/sftp-server`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_fstat",
|
||||
@@ -388,6 +408,47 @@ Example:
|
||||
ssh-ed25519 ssh-rsa ssh-dss
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ssh",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Path and arguments to external ssh binary.
|
||||
|
||||
Normally rclone will use its internal ssh library to connect to the
|
||||
SFTP server. However it does not implement all possible ssh options so
|
||||
it may be desirable to use an external ssh binary.
|
||||
|
||||
Rclone ignores all the internal config if you use this option and
|
||||
expects you to configure the ssh binary with the user/host/port and
|
||||
any other options you need.
|
||||
|
||||
**Important** The ssh command must log in without asking for a
|
||||
password so needs to be configured with keys or certificates.
|
||||
|
||||
Rclone will run the command supplied either with the additional
|
||||
arguments "-s sftp" to access the SFTP subsystem or with commands such
|
||||
as "md5sum /path/to/file" appended to read checksums.
|
||||
|
||||
Any arguments with spaces in should be surrounded by "double quotes".
|
||||
|
||||
An example setting might be:
|
||||
|
||||
ssh -o ServerAliveInterval=20 user@example.com
|
||||
|
||||
Note that when using an external ssh binary rclone makes a new ssh
|
||||
connection for every hash it calculates.
|
||||
`,
|
||||
}, {
|
||||
Name: "socks_proxy",
|
||||
Default: "",
|
||||
Help: `Socks 5 proxy host.
|
||||
|
||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||
|
||||
Example:
|
||||
|
||||
myUser:myPass@localhost:9005
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -427,6 +488,8 @@ type Options struct {
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
HostKeyAlgorithms fs.SpaceSepList `config:"host_key_algorithms"`
|
||||
SSH fs.SpaceSepList `config:"ssh"`
|
||||
SocksProxy string `config:"socks_proxy"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -449,7 +512,7 @@ type Fs struct {
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
savedpswd string
|
||||
sessions int32 // count in use sessions
|
||||
sessions atomic.Int32 // count in use sessions
|
||||
}
|
||||
|
||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
@@ -463,41 +526,16 @@ type Object struct {
|
||||
sha1sum *string // Cached SHA1 checksum
|
||||
}
|
||||
|
||||
// dial starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (*ssh.Client, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
conn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
|
||||
return ssh.NewClient(c, chans, reqs), nil
|
||||
}
|
||||
|
||||
// conn encapsulates an ssh client and corresponding sftp client
|
||||
type conn struct {
|
||||
sshClient *ssh.Client
|
||||
sshClient sshClient
|
||||
sftpClient *sftp.Client
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (c *conn) wait() {
|
||||
c.err <- c.sshClient.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (c *conn) sendKeepAlive() {
|
||||
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
c.err <- c.sshClient.Wait()
|
||||
}
|
||||
|
||||
// Send keepalives every interval over the ssh connection until done is closed
|
||||
@@ -509,7 +547,7 @@ func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
c.sendKeepAlive()
|
||||
c.sshClient.SendKeepAlive()
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
@@ -542,17 +580,17 @@ func (c *conn) closed() error {
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
f.sessions.Add(1)
|
||||
}
|
||||
|
||||
// Show the ssh session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
f.sessions.Add(-1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
return f.sessions.Load()
|
||||
}
|
||||
|
||||
// Open a new connection to the SFTP server.
|
||||
@@ -561,7 +599,11 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
c = &conn{
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
c.sshClient, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
if len(f.opt.SSH) == 0 {
|
||||
c.sshClient, err = f.newSSHClientInternal(ctx, "tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||
} else {
|
||||
c.sshClient, err = f.newSSHClientExternal()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SSH: %w", err)
|
||||
}
|
||||
@@ -575,7 +617,7 @@ func (f *Fs) sftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
}
|
||||
|
||||
// Set any environment variables on the ssh.Session
|
||||
func (f *Fs) setEnv(s *ssh.Session) error {
|
||||
func (f *Fs) setEnv(s sshSession) error {
|
||||
for _, env := range f.opt.SetEnv {
|
||||
equal := strings.IndexRune(env, '=')
|
||||
if equal < 0 {
|
||||
@@ -592,8 +634,8 @@ func (f *Fs) setEnv(s *ssh.Session) error {
|
||||
|
||||
// Creates a new SFTP client on conn, using the specified subsystem
|
||||
// or sftp server, and zero or more option functions
|
||||
func (f *Fs) newSftpClient(conn *ssh.Client, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := conn.NewSession()
|
||||
func (f *Fs) newSftpClient(client sshClient, opts ...sftp.ClientOption) (*sftp.Client, error) {
|
||||
s, err := client.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -666,6 +708,9 @@ func (f *Fs) getSftpConnection(ctx context.Context) (c *conn, err error) {
|
||||
// Getwd request
|
||||
func (f *Fs) putSftpConnection(pc **conn, err error) {
|
||||
c := *pc
|
||||
if !c.sshClient.CanReuse() {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// work out if this is an expected error
|
||||
@@ -744,6 +789,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opt.SSH) != 0 && ((opt.User != currentUser && opt.User != "") || opt.Host != "" || (opt.Port != "22" && opt.Port != "")) {
|
||||
fs.Logf(name, "--sftp-ssh is in use - ignoring user/host/port from config - set in the parameters to --sftp-ssh (remove them from the config to silence this warning)")
|
||||
}
|
||||
|
||||
if opt.User == "" {
|
||||
opt.User = currentUser
|
||||
}
|
||||
@@ -796,7 +845,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
pubkeyFile := env.ShellExpand(opt.PubKeyFile)
|
||||
//keyPem := env.ShellExpand(opt.KeyPem)
|
||||
// Add ssh agent-auth if no password or file or key PEM specified
|
||||
if (opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
if (len(opt.SSH) == 0 && opt.Pass == "" && keyFile == "" && !opt.AskPassword && opt.KeyPem == "") || opt.KeyUseAgent {
|
||||
sshAgentClient, _, err := sshagent.New()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect to ssh-agent: %w", err)
|
||||
@@ -1016,8 +1065,8 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Failed to get shell session for shell type detection command: %v", err)
|
||||
} else {
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
session.SetStdout(&stdout)
|
||||
session.SetStderr(&stderr)
|
||||
shellCmd := "echo ${ShellId}%ComSpec%"
|
||||
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
|
||||
err = session.Run(shellCmd)
|
||||
@@ -1427,8 +1476,8 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
|
||||
}()
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
session.Stdout = &stdout
|
||||
session.Stderr = &stderr
|
||||
session.SetStdout(&stdout)
|
||||
session.SetStderr(&stderr)
|
||||
|
||||
fs.Debugf(f, "Running remote command: %s", cmd)
|
||||
err = session.Run(cmd)
|
||||
@@ -1735,6 +1784,9 @@ func (f *Fs) remotePath(remote string) string {
|
||||
func (f *Fs) remoteShellPath(remote string) string {
|
||||
if f.opt.PathOverride != "" {
|
||||
shellPath := path.Join(f.opt.PathOverride, remote)
|
||||
if f.opt.PathOverride[0] == '@' {
|
||||
shellPath = path.Join(strings.TrimPrefix(f.opt.PathOverride, "@"), f.absRoot, remote)
|
||||
}
|
||||
fs.Debugf(f, "Shell path redirected to %q with option path_override", shellPath)
|
||||
return shellPath
|
||||
}
|
||||
@@ -1992,9 +2044,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update: %w", err)
|
||||
}
|
||||
// Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading
|
||||
file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
return fmt.Errorf("Update Create failed: %w", err)
|
||||
}
|
||||
// remove the file if upload failed
|
||||
@@ -2014,14 +2067,18 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
_, err = file.ReadFrom(&sizeReader{Reader: in, size: src.Size()})
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
remove()
|
||||
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||
}
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
remove()
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
// Release connection only when upload has finished so we don't upload multiple files on the same connection
|
||||
o.fs.putSftpConnection(&c, err)
|
||||
|
||||
// Set the mod time - this stats the object if o.fs.opt.SetModTime == true
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
|
||||
@@ -30,3 +30,13 @@ func TestIntegration2(t *testing.T) {
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSFTPRcloneSSH:",
|
||||
NilObject: (*sftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
73
backend/sftp/ssh.go
Normal file
73
backend/sftp/ssh.go
Normal file
@@ -0,0 +1,73 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import "io"
|
||||
|
||||
// Interfaces for ssh client and session implemented in ssh_internal.go and ssh_external.go
|
||||
|
||||
// An interface for an ssh client to abstract over internal ssh library and external binary
|
||||
type sshClient interface {
|
||||
// Wait blocks until the connection has shut down, and returns the
|
||||
// error causing the shutdown.
|
||||
Wait() error
|
||||
|
||||
// SendKeepAlive sends a keepalive message to keep the connection open
|
||||
SendKeepAlive()
|
||||
|
||||
// Close the connection
|
||||
Close() error
|
||||
|
||||
// NewSession opens a new sshSession for this sshClient. (A
|
||||
// session is a remote execution of a program.)
|
||||
NewSession() (sshSession, error)
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
CanReuse() bool
|
||||
}
|
||||
|
||||
// An interface for an ssh session to abstract over internal ssh library and external binary
|
||||
type sshSession interface {
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
Setenv(name, value string) error
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
Start(cmd string) error
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
StdinPipe() (io.WriteCloser, error)
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
StdoutPipe() (io.Reader, error)
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem
|
||||
// with the session on the remote host. A subsystem is a
|
||||
// predefined command that runs in the background when the ssh
|
||||
// session is initiated
|
||||
RequestSubsystem(subsystem string) error
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
Run(cmd string) error
|
||||
|
||||
// Close the session
|
||||
Close() error
|
||||
|
||||
// Set the stdout
|
||||
SetStdout(io.Writer)
|
||||
|
||||
// Set the stderr
|
||||
SetStderr(io.Writer)
|
||||
}
|
||||
223
backend/sftp/ssh_external.go
Normal file
223
backend/sftp/ssh_external.go
Normal file
@@ -0,0 +1,223 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Implement the sshClient interface for external ssh programs
|
||||
type sshClientExternal struct {
|
||||
f *Fs
|
||||
session *sshSessionExternal
|
||||
}
|
||||
|
||||
func (f *Fs) newSSHClientExternal() (sshClient, error) {
|
||||
return &sshClientExternal{f: f}, nil
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (s *sshClientExternal) Wait() error {
|
||||
if s.session == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (s *sshClientExternal) SendKeepAlive() {
|
||||
// Up to the user to configure -o ServerAliveInterval=20 on their ssh connections
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (s *sshClientExternal) Close() error {
|
||||
if s.session == nil {
|
||||
return nil
|
||||
}
|
||||
return s.session.Close()
|
||||
}
|
||||
|
||||
// NewSession makes a new external SSH connection
|
||||
func (s *sshClientExternal) NewSession() (sshSession, error) {
|
||||
session := s.f.newSSHSessionExternal()
|
||||
if s.session == nil {
|
||||
fs.Debugf(s.f, "ssh external: creating additional session")
|
||||
}
|
||||
return session, nil
|
||||
}
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
func (s *sshClientExternal) CanReuse() bool {
|
||||
if s.session == nil {
|
||||
return true
|
||||
}
|
||||
exited := s.session.exited()
|
||||
canReuse := !exited && s.session.runningSFTP
|
||||
// fs.Debugf(s.f, "ssh external: CanReuse %v, exited=%v runningSFTP=%v", canReuse, exited, s.session.runningSFTP)
|
||||
return canReuse
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshClient = &sshClientExternal{}
|
||||
|
||||
// implement the sshSession interface for external ssh binary
|
||||
type sshSessionExternal struct {
|
||||
f *Fs
|
||||
cmd *exec.Cmd
|
||||
cancel func()
|
||||
startCalled bool
|
||||
runningSFTP bool
|
||||
}
|
||||
|
||||
func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
||||
s := &sshSessionExternal{
|
||||
f: f,
|
||||
}
|
||||
|
||||
// Make a cancellation function for this to call in Close()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.cancel = cancel
|
||||
|
||||
// Connect to a remote host and request the sftp subsystem via
|
||||
// the 'ssh' command. This assumes that passwordless login is
|
||||
// correctly configured.
|
||||
ssh := append([]string(nil), s.f.opt.SSH...)
|
||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
// FIXME enable when we get rid of go1.19
|
||||
// s.cmd.WaitDelay = time.Second
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Setenv sets an environment variable that will be applied to any
|
||||
// command executed by Shell or Run.
|
||||
func (s *sshSessionExternal) Setenv(name, value string) error {
|
||||
return errors.New("ssh external: can't set environment variables")
|
||||
}
|
||||
|
||||
const requestSubsystem = "***Subsystem***:"
|
||||
|
||||
// Start runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start or Shell.
|
||||
func (s *sshSessionExternal) Start(cmd string) error {
|
||||
if s.startCalled {
|
||||
return errors.New("internal error: ssh external: command already running")
|
||||
}
|
||||
s.startCalled = true
|
||||
|
||||
// Adjust the args
|
||||
if strings.HasPrefix(cmd, requestSubsystem) {
|
||||
s.cmd.Args = append(s.cmd.Args, "-s", cmd[len(requestSubsystem):])
|
||||
s.runningSFTP = true
|
||||
} else {
|
||||
s.cmd.Args = append(s.cmd.Args, cmd)
|
||||
s.runningSFTP = false
|
||||
}
|
||||
|
||||
fs.Debugf(s.f, "ssh external: running: %v", fs.SpaceSepList(s.cmd.Args))
|
||||
|
||||
// start the process
|
||||
err := s.cmd.Start()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh external: start process: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestSubsystem requests the association of a subsystem
|
||||
// with the session on the remote host. A subsystem is a
|
||||
// predefined command that runs in the background when the ssh
|
||||
// session is initiated
|
||||
func (s *sshSessionExternal) RequestSubsystem(subsystem string) error {
|
||||
return s.Start(requestSubsystem + subsystem)
|
||||
}
|
||||
|
||||
// StdinPipe returns a pipe that will be connected to the
|
||||
// remote command's standard input when the command starts.
|
||||
func (s *sshSessionExternal) StdinPipe() (io.WriteCloser, error) {
|
||||
rd, err := s.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh external: stdin pipe: %w", err)
|
||||
}
|
||||
return rd, nil
|
||||
}
|
||||
|
||||
// StdoutPipe returns a pipe that will be connected to the
|
||||
// remote command's standard output when the command starts.
|
||||
// There is a fixed amount of buffering that is shared between
|
||||
// stdout and stderr streams. If the StdoutPipe reader is
|
||||
// not serviced fast enough it may eventually cause the
|
||||
// remote command to block.
|
||||
func (s *sshSessionExternal) StdoutPipe() (io.Reader, error) {
|
||||
wr, err := s.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssh external: stdout pipe: %w", err)
|
||||
}
|
||||
return wr, nil
|
||||
}
|
||||
|
||||
// Return whether the command has finished or not
|
||||
func (s *sshSessionExternal) exited() bool {
|
||||
return s.cmd.ProcessState != nil
|
||||
}
|
||||
|
||||
// Wait for the command to exit
|
||||
func (s *sshSessionExternal) Wait() error {
|
||||
if s.exited() {
|
||||
return nil
|
||||
}
|
||||
err := s.cmd.Wait()
|
||||
if err == nil {
|
||||
fs.Debugf(s.f, "ssh external: command exited OK")
|
||||
} else {
|
||||
fs.Debugf(s.f, "ssh external: command exited with error: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Run runs cmd on the remote host. Typically, the remote
|
||||
// server passes cmd to the shell for interpretation.
|
||||
// A Session only accepts one call to Run, Start, Shell, Output,
|
||||
// or CombinedOutput.
|
||||
func (s *sshSessionExternal) Run(cmd string) error {
|
||||
err := s.Start(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Wait()
|
||||
}
|
||||
|
||||
// Close the external ssh
|
||||
func (s *sshSessionExternal) Close() error {
|
||||
fs.Debugf(s.f, "ssh external: close")
|
||||
// Cancel the context which kills the process
|
||||
s.cancel()
|
||||
// Wait for it to finish
|
||||
_ = s.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set the stdout
|
||||
func (s *sshSessionExternal) SetStdout(wr io.Writer) {
|
||||
s.cmd.Stdout = wr
|
||||
}
|
||||
|
||||
// Set the stderr
|
||||
func (s *sshSessionExternal) SetStderr(wr io.Writer) {
|
||||
s.cmd.Stderr = wr
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshSession = &sshSessionExternal{}
|
||||
101
backend/sftp/ssh_internal.go
Normal file
101
backend/sftp/ssh_internal.go
Normal file
@@ -0,0 +1,101 @@
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
package sftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/proxy"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// Internal ssh connections with "golang.org/x/crypto/ssh"
|
||||
|
||||
type sshClientInternal struct {
|
||||
srv *ssh.Client
|
||||
}
|
||||
|
||||
// newSSHClientInternal starts a client connection to the given SSH server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SSH handshake, and then sets up a Client.
|
||||
func (f *Fs) newSSHClientInternal(ctx context.Context, network, addr string, sshConfig *ssh.ClientConfig) (sshClient, error) {
|
||||
|
||||
baseDialer := fshttp.NewDialer(ctx)
|
||||
var (
|
||||
conn net.Conn
|
||||
err error
|
||||
)
|
||||
if f.opt.SocksProxy != "" {
|
||||
conn, err = proxy.SOCKS5Dial(network, addr, f.opt.SocksProxy, baseDialer)
|
||||
} else {
|
||||
conn, err = baseDialer.Dial(network, addr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, chans, reqs, err := ssh.NewClientConn(conn, addr, sshConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(f, "New connection %s->%s to %q", c.LocalAddr(), c.RemoteAddr(), c.ServerVersion())
|
||||
srv := ssh.NewClient(c, chans, reqs)
|
||||
return sshClientInternal{srv}, nil
|
||||
}
|
||||
|
||||
// Wait for connection to close
|
||||
func (s sshClientInternal) Wait() error {
|
||||
return s.srv.Conn.Wait()
|
||||
}
|
||||
|
||||
// Send a keepalive over the ssh connection
|
||||
func (s sshClientInternal) SendKeepAlive() {
|
||||
_, _, err := s.srv.SendRequest("keepalive@openssh.com", true, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to send keep alive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close the connection
|
||||
func (s sshClientInternal) Close() error {
|
||||
return s.srv.Close()
|
||||
}
|
||||
|
||||
// CanReuse indicates if this client can be reused
|
||||
func (s sshClientInternal) CanReuse() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshClient = sshClientInternal{}
|
||||
|
||||
// Thin wrapper for *ssh.Session to implement sshSession interface
|
||||
type sshSessionInternal struct {
|
||||
*ssh.Session
|
||||
}
|
||||
|
||||
// Set the stdout
|
||||
func (s sshSessionInternal) SetStdout(wr io.Writer) {
|
||||
s.Session.Stdout = wr
|
||||
}
|
||||
|
||||
// Set the stderr
|
||||
func (s sshSessionInternal) SetStderr(wr io.Writer) {
|
||||
s.Session.Stderr = wr
|
||||
}
|
||||
|
||||
// NewSession makes an sshSession from an sshClient
|
||||
func (s sshClientInternal) NewSession() (sshSession, error) {
|
||||
session, err := s.srv.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sshSessionInternal{Session: session}, nil
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var _ sshSession = sshSessionInternal{}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
@@ -89,17 +88,17 @@ func (c *conn) closed() bool {
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
f.sessions.Add(1)
|
||||
}
|
||||
|
||||
// Show the SMB session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
f.sessions.Add(-1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
return f.sessions.Load()
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -140,7 +141,7 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
|
||||
sessions int32
|
||||
sessions atomic.Int32
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
@@ -475,6 +476,45 @@ func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// OpenWriterAt opens with a handle for random access writes
|
||||
//
|
||||
// Pass in the remote desired and the size if known.
|
||||
//
|
||||
// It truncates any existing object
|
||||
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||
var err error
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
return fl, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/edge"
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -276,6 +277,8 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
|
||||
UserAgent: "rclone",
|
||||
}
|
||||
|
||||
ctx = testuplink.WithConcurrentSegmentUploadsDefaultConfig(ctx)
|
||||
|
||||
project, err = cfg.OpenProject(ctx, f.access)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storj: project: %w", err)
|
||||
|
||||
@@ -561,7 +561,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.O
|
||||
// returned as 0 bytes in the listing. Correct this here by
|
||||
// making sure we read the full metadata for all 0 byte files.
|
||||
// We don't read the metadata for directory marker objects.
|
||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
|
||||
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" && !o.fs.opt.NoLargeObjects {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// We have a dangling large object here so just return the original metadata
|
||||
|
||||
@@ -17,8 +17,9 @@ import (
|
||||
// This is a wrapped object which returns the Union Fs as its parent
|
||||
type Object struct {
|
||||
*upstream.Object
|
||||
fs *Fs // what this object is part of
|
||||
co []upstream.Entry
|
||||
fs *Fs // what this object is part of
|
||||
co []upstream.Entry
|
||||
writebackMu sync.Mutex
|
||||
}
|
||||
|
||||
// Directory describes a union Directory
|
||||
@@ -34,6 +35,13 @@ type entry interface {
|
||||
candidates() []upstream.Entry
|
||||
}
|
||||
|
||||
// Update o with the contents of newO excluding the lock
|
||||
func (o *Object) update(newO *Object) {
|
||||
o.Object = newO.Object
|
||||
o.fs = newO.fs
|
||||
o.co = newO.co
|
||||
}
|
||||
|
||||
// UnWrapUpstream returns the upstream Object that this Object is wrapping
|
||||
func (o *Object) UnWrapUpstream() *upstream.Object {
|
||||
return o.Object
|
||||
@@ -67,7 +75,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
// Update current object
|
||||
*o = *newO.(*Object)
|
||||
o.update(newO.(*Object))
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
@@ -175,6 +183,25 @@ func (o *Object) SetTier(tier string) error {
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Need some sort of locking to prevent multiple downloads
|
||||
o.writebackMu.Lock()
|
||||
defer o.writebackMu.Unlock()
|
||||
|
||||
// FIXME what if correct object is already in o.co
|
||||
|
||||
newObj, err := o.Object.Writeback(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if newObj != nil {
|
||||
o.Object = newObj
|
||||
o.co = append(o.co, newObj) // FIXME should this append or overwrite or update?
|
||||
}
|
||||
return o.Object.Object.Open(ctx, options...)
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the directory
|
||||
// It returns the latest ModTime of all candidates
|
||||
func (d *Directory) ModTime(ctx context.Context) (t time.Time) {
|
||||
|
||||
@@ -877,6 +877,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
opt: *opt,
|
||||
upstreams: usedUpstreams,
|
||||
}
|
||||
err = upstream.Prepare(f.upstreams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt"}
|
||||
unimplementableFsMethods = []string{"UnWrap", "WrapFs", "SetWrapper", "UserInfo", "Disconnect", "PublicLink", "PutUnchecked", "MergeDirs", "OpenWriterAt", "OpenChunkWriter"}
|
||||
unimplementableObjectMethods = []string{}
|
||||
)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -25,10 +26,6 @@ var (
|
||||
|
||||
// Fs is a wrap of any fs and its configs
|
||||
type Fs struct {
|
||||
// In order to ensure memory alignment on 32-bit architectures
|
||||
// when this field is accessed through sync/atomic functions,
|
||||
// it must be the first entry in the struct
|
||||
cacheExpiry int64 // usage cache expiry time
|
||||
fs.Fs
|
||||
RootFs fs.Fs
|
||||
RootPath string
|
||||
@@ -37,9 +34,12 @@ type Fs struct {
|
||||
creatable bool
|
||||
usage *fs.Usage // Cache the usage
|
||||
cacheTime time.Duration // cache duration
|
||||
cacheExpiry atomic.Int64 // usage cache expiry time
|
||||
cacheMutex sync.RWMutex
|
||||
cacheOnce sync.Once
|
||||
cacheUpdate bool // if the cache is updating
|
||||
writeback bool // writeback to this upstream
|
||||
writebackFs *Fs // if non zero, writeback to this upstream
|
||||
}
|
||||
|
||||
// Directory describes a wrapped Directory
|
||||
@@ -73,14 +73,14 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
return nil, err
|
||||
}
|
||||
f := &Fs{
|
||||
RootPath: strings.TrimRight(root, "/"),
|
||||
Opt: opt,
|
||||
writable: true,
|
||||
creatable: true,
|
||||
cacheExpiry: time.Now().Unix(),
|
||||
cacheTime: time.Duration(opt.CacheTime) * time.Second,
|
||||
usage: &fs.Usage{},
|
||||
RootPath: strings.TrimRight(root, "/"),
|
||||
Opt: opt,
|
||||
writable: true,
|
||||
creatable: true,
|
||||
cacheTime: time.Duration(opt.CacheTime) * time.Second,
|
||||
usage: &fs.Usage{},
|
||||
}
|
||||
f.cacheExpiry.Store(time.Now().Unix())
|
||||
if strings.HasSuffix(fsPath, ":ro") {
|
||||
f.writable = false
|
||||
f.creatable = false
|
||||
@@ -89,6 +89,9 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
f.writable = true
|
||||
f.creatable = false
|
||||
fsPath = fsPath[0 : len(fsPath)-3]
|
||||
} else if strings.HasSuffix(fsPath, ":writeback") {
|
||||
f.writeback = true
|
||||
fsPath = fsPath[0 : len(fsPath)-len(":writeback")]
|
||||
}
|
||||
remote = configName + fsPath
|
||||
rFs, err := cache.Get(ctx, remote)
|
||||
@@ -106,6 +109,29 @@ func New(ctx context.Context, remote, root string, opt *common.Options) (*Fs, er
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Prepare the configured upstreams as a group
|
||||
func Prepare(fses []*Fs) error {
|
||||
writebacks := 0
|
||||
var writebackFs *Fs
|
||||
for _, f := range fses {
|
||||
if f.writeback {
|
||||
writebackFs = f
|
||||
writebacks++
|
||||
}
|
||||
}
|
||||
if writebacks == 0 {
|
||||
return nil
|
||||
} else if writebacks > 1 {
|
||||
return fmt.Errorf("can only have 1 :writeback not %d", writebacks)
|
||||
}
|
||||
for _, f := range fses {
|
||||
if !f.writeback {
|
||||
f.writebackFs = writebackFs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WrapDirectory wraps an fs.Directory to include the info
|
||||
// of the upstream Fs
|
||||
func (f *Fs) WrapDirectory(e fs.Directory) *Directory {
|
||||
@@ -296,9 +322,31 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Writeback writes the object back and returns a new object
|
||||
//
|
||||
// If it returns nil, nil then the original object is OK
|
||||
func (o *Object) Writeback(ctx context.Context) (*Object, error) {
|
||||
if o.f.writebackFs == nil {
|
||||
return nil, nil
|
||||
}
|
||||
newObj, err := operations.Copy(ctx, o.f.writebackFs.Fs, nil, o.Object.Remote(), o.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// newObj could be nil here
|
||||
if newObj == nil {
|
||||
fs.Errorf(o, "nil Object returned from operations.Copy")
|
||||
return nil, nil
|
||||
}
|
||||
return &Object{
|
||||
Object: newObj,
|
||||
f: o.f,
|
||||
}, err
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return nil, ErrUsageFieldNotSupported
|
||||
@@ -313,7 +361,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
//
|
||||
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
|
||||
func (f *Fs) GetFreeSpace() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return math.MaxInt64 - 1, ErrUsageFieldNotSupported
|
||||
@@ -331,7 +379,7 @@ func (f *Fs) GetFreeSpace() (int64, error) {
|
||||
//
|
||||
// This is returned as 0..math.MaxInt64-1 leaving math.MaxInt64 as a sentinel
|
||||
func (f *Fs) GetUsedSpace() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
@@ -347,7 +395,7 @@ func (f *Fs) GetUsedSpace() (int64, error) {
|
||||
|
||||
// GetNumObjects get the number of objects of the fs
|
||||
func (f *Fs) GetNumObjects() (int64, error) {
|
||||
if atomic.LoadInt64(&f.cacheExpiry) <= time.Now().Unix() {
|
||||
if f.cacheExpiry.Load() <= time.Now().Unix() {
|
||||
err := f.updateUsage()
|
||||
if err != nil {
|
||||
return 0, ErrUsageFieldNotSupported
|
||||
@@ -402,7 +450,7 @@ func (f *Fs) updateUsageCore(lock bool) error {
|
||||
defer f.cacheMutex.Unlock()
|
||||
}
|
||||
// Store usage
|
||||
atomic.StoreInt64(&f.cacheExpiry, time.Now().Add(f.cacheTime).Unix())
|
||||
f.cacheExpiry.Store(time.Now().Add(f.cacheTime).Unix())
|
||||
f.usage = usage
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
|
||||
|
||||
getBody := func() (io.ReadCloser, error) {
|
||||
// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
|
||||
if _, err := in.Seek(0, io.SeekStart); err == nil {
|
||||
if _, err := in.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) erro
|
||||
resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
|
||||
|
||||
// directory doesn't exist, no need to purge
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
|
||||
"github.com/rclone/rclone/backend/zoho/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -1169,31 +1168,8 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
var start, end int64 = 0, o.size
|
||||
partialContent := false
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
start = x.Offset
|
||||
partialContent = true
|
||||
case *fs.RangeOption:
|
||||
if x.Start >= 0 {
|
||||
start = x.Start
|
||||
if x.End > 0 && x.End < o.size {
|
||||
end = x.End + 1
|
||||
}
|
||||
} else {
|
||||
// {-1, 20} should load the last 20 characters [len-20:len]
|
||||
start = o.size - x.End
|
||||
}
|
||||
partialContent = true
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(nil, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
var resp *http.Response
|
||||
fs.FixRangeOption(options, o.size)
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/download/" + o.id,
|
||||
@@ -1206,20 +1182,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if partialContent && resp.StatusCode == 200 && resp.Header.Get("Content-Range") == "" {
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_ = resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// ... and return a limited reader for the remaining of the data
|
||||
return readers.NewLimitedReadCloser(resp.Body, end-start), nil
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,3 +6,4 @@
|
||||
<abhi18av@users.noreply.github.com>
|
||||
<ankur0493@gmail.com>
|
||||
<agupta@egnyte.com>
|
||||
<ricci@disroot.org>
|
||||
|
||||
@@ -25,6 +25,7 @@ docs = [
|
||||
"flags.md",
|
||||
"docker.md",
|
||||
"bisync.md",
|
||||
"release_signing.md",
|
||||
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
@@ -61,12 +62,15 @@ docs = [
|
||||
"opendrive.md",
|
||||
"oracleobjectstorage.md",
|
||||
"qingstor.md",
|
||||
"quatrix.md",
|
||||
"sia.md",
|
||||
"swift.md",
|
||||
"pcloud.md",
|
||||
"pikpak.md",
|
||||
"premiumizeme.md",
|
||||
"protondrive.md",
|
||||
"putio.md",
|
||||
"protondrive.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"smb.md",
|
||||
|
||||
@@ -22,8 +22,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable")
|
||||
flags.BoolVarP(cmdFlags, &jsonOutput, "json", "", false, "Format output as JSON", "")
|
||||
flags.BoolVarP(cmdFlags, &fullOutput, "full", "", false, "Full numbers instead of human-readable", "")
|
||||
}
|
||||
|
||||
// printValue formats uv to be output
|
||||
@@ -95,6 +95,7 @@ see complete list in [documentation](https://rclone.org/overview/#optional-featu
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
// "groups": "",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
@@ -18,8 +18,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser", "")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -36,6 +36,7 @@ link in default browser automatically.
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
// "groups": "",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
|
||||
@@ -24,8 +24,8 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format")
|
||||
flags.StringArrayVarP(cmdFlags, &options, "option", "o", options, "Option in the form name=value or name", "")
|
||||
flags.BoolVarP(cmdFlags, &useJSON, "json", "", useJSON, "Always output in JSON format", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -60,6 +60,7 @@ Note to run these commands on a running backend then see
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
"groups": "Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
|
||||
@@ -614,6 +614,8 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
opt.DryRun = true
|
||||
case "force":
|
||||
opt.Force = true
|
||||
case "create-empty-src-dirs":
|
||||
opt.CreateEmptySrcDirs = true
|
||||
case "remove-empty-dirs":
|
||||
opt.RemoveEmptyDirs = true
|
||||
case "check-sync-only":
|
||||
@@ -1163,6 +1165,10 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
||||
b.workDir + slash, "{workdir/}",
|
||||
b.path1, "{path1/}",
|
||||
b.path2, "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.sessionName, "{session}",
|
||||
}
|
||||
if fixSlash {
|
||||
|
||||
@@ -27,18 +27,21 @@ import (
|
||||
|
||||
// Options keep bisync options
|
||||
type Options struct {
|
||||
Resync bool
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
RemoveEmptyDirs bool
|
||||
MaxDelete int // percentage from 0 to 100
|
||||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
Resync bool
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
CreateEmptySrcDirs bool
|
||||
RemoveEmptyDirs bool
|
||||
MaxDelete int // percentage from 0 to 100
|
||||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
IgnoreListingChecksum bool
|
||||
Resilient bool
|
||||
}
|
||||
|
||||
// Default values
|
||||
@@ -98,16 +101,19 @@ var Opt Options
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."))
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"))
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose")
|
||||
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)")
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove empty directories at the final cleanup step.")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"))
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
||||
flags.FVarP(cmdFlags, &Opt.CheckSync, "check-sync", "", "Controls comparison of final listings: true|false|only (default: true)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CreateEmptySrcDirs, "create-empty-src-dirs", "", Opt.CreateEmptySrcDirs, "Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||
}
|
||||
|
||||
// bisync command definition
|
||||
@@ -117,6 +123,7 @@ var commandDefinition = &cobra.Command{
|
||||
Long: longHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
@@ -209,9 +216,13 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
||||
}
|
||||
|
||||
if opt.Resync {
|
||||
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
||||
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
return ctx, err
|
||||
if opt.DryRun {
|
||||
fs.Infof(nil, "Skipped storing filters file hash to %s as --dry-run is set", hashFile)
|
||||
} else {
|
||||
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
||||
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,18 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
@@ -90,6 +95,47 @@ func (ds *deltaSet) printStats() {
|
||||
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
|
||||
// note that cryptCheck() is not currently exported
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.Check(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// findDeltas
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
|
||||
var old, now *fileList
|
||||
@@ -183,6 +229,52 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
|
||||
ctxMove := b.opt.setDryRun(ctx)
|
||||
|
||||
// efficient isDir check
|
||||
// we load the listing just once and store only the dirs
|
||||
dirs1, dirs1Err := b.listDirsOnly(1)
|
||||
if dirs1Err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "Error generating dirsonly list for path1: %v", dirs1Err)
|
||||
return
|
||||
}
|
||||
|
||||
dirs2, dirs2Err := b.listDirsOnly(2)
|
||||
if dirs2Err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "Error generating dirsonly list for path2: %v", dirs2Err)
|
||||
return
|
||||
}
|
||||
|
||||
// build a list of only the "deltaOther"s so we don't have to check more files than necessary
|
||||
// this is essentially the same as running rclone check with a --files-from filter, then exempting the --match results from being renamed
|
||||
// we therefore avoid having to list the same directory more than once.
|
||||
|
||||
// we are intentionally overriding DryRun here because we need to perform the check, even during a dry run, or the results would be inaccurate.
|
||||
// check is a read-only operation by its nature, so it's already "dry" in that sense.
|
||||
ctxNew, ciCheck := fs.AddConfig(ctx)
|
||||
ciCheck.DryRun = false
|
||||
|
||||
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
d1 := ds1.deltas[file]
|
||||
if d1.is(deltaOther) {
|
||||
d2 := ds2.deltas[file]
|
||||
if d2.is(deltaOther) {
|
||||
if err := filterCheck.AddFile(file); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//if there are potential conflicts to check, check them all here (outside the loop) in one fell swoop
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
p1 := path1 + file
|
||||
p2 := path2 + file
|
||||
@@ -199,22 +291,34 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
handled.Add(file)
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
if dirs1.has(file) && dirs2.has(file) {
|
||||
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
} else {
|
||||
equal := matches.Has(file)
|
||||
if equal {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
} else {
|
||||
fs.Debugf(nil, "Files are NOT equal: %s", file)
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
}
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
handled.Add(file)
|
||||
}
|
||||
} else {
|
||||
@@ -258,6 +362,9 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() {
|
||||
@@ -267,6 +374,9 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
|
||||
}
|
||||
|
||||
if delete1.NotEmpty() {
|
||||
@@ -276,6 +386,9 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
|
||||
}
|
||||
|
||||
if delete2.NotEmpty() {
|
||||
@@ -285,6 +398,9 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
@@ -27,11 +27,16 @@ var rcHelp = makeHelp(`This takes the following parameters
|
||||
- checkFilename - file name for checkAccess (default: {CHECKFILE})
|
||||
- maxDelete - abort sync if percentage of deleted files is above
|
||||
this threshold (default: {MAXDELETE})
|
||||
- force - maxDelete safety check and run the sync
|
||||
- force - Bypass maxDelete safety check and run the sync
|
||||
- checkSync - |true| by default, |false| disables comparison of final listings,
|
||||
|only| will skip sync, only compare listings from the last run
|
||||
- createEmptySrcDirs - Sync creation and deletion of empty directories.
|
||||
(Not compatible with --remove-empty-dirs)
|
||||
- removeEmptyDirs - remove empty directories at the final cleanup step
|
||||
- filtersFile - read filtering patterns from a file
|
||||
- ignoreListingChecksum - Do not use checksums for listings
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
Use at your own risk!
|
||||
- workdir - server directory for history files (default: {WORKDIR})
|
||||
- noCleanup - retain working files
|
||||
|
||||
|
||||
@@ -43,10 +43,11 @@ var tzLocal = false
|
||||
|
||||
// fileInfo describes a file
|
||||
type fileInfo struct {
|
||||
size int64
|
||||
time time.Time
|
||||
hash string
|
||||
id string
|
||||
size int64
|
||||
time time.Time
|
||||
hash string
|
||||
id string
|
||||
flags string
|
||||
}
|
||||
|
||||
// fileList represents a listing
|
||||
@@ -76,17 +77,18 @@ func (ls *fileList) get(file string) *fileInfo {
|
||||
return ls.info[file]
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string) {
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
fi.size = size
|
||||
fi.time = time
|
||||
} else {
|
||||
fi = &fileInfo{
|
||||
size: size,
|
||||
time: time,
|
||||
hash: hash,
|
||||
id: id,
|
||||
size: size,
|
||||
time: time,
|
||||
hash: hash,
|
||||
id: id,
|
||||
flags: flags,
|
||||
}
|
||||
ls.info[file] = fi
|
||||
ls.list = append(ls.list, file)
|
||||
@@ -152,7 +154,11 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
id = "-"
|
||||
}
|
||||
|
||||
flags := "-"
|
||||
flags := fi.flags
|
||||
if flags == "" {
|
||||
flags = "-"
|
||||
}
|
||||
|
||||
_, err = fmt.Fprintf(file, lineFormat, flags, fi.size, hash, id, time, remote)
|
||||
if err != nil {
|
||||
_ = file.Close()
|
||||
@@ -217,7 +223,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if flags != "-" || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
|
||||
if (flags != "-" && flags != "d") || id != "-" || sizeErr != nil || timeErr != nil || hashErr != nil || nameErr != nil {
|
||||
fs.Logf(listing, "Ignoring incorrect line: %q", line)
|
||||
continue
|
||||
}
|
||||
@@ -229,7 +235,7 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
}
|
||||
}
|
||||
|
||||
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id)
|
||||
ls.put(nameVal, sizeVal, timeVal.In(TZ), hashVal, id, flags)
|
||||
}
|
||||
|
||||
return ls, nil
|
||||
@@ -253,15 +259,20 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
|
||||
ci := fs.GetConfig(ctx)
|
||||
depth := ci.MaxDepth
|
||||
hashType := hash.None
|
||||
if !ci.IgnoreChecksum {
|
||||
// Currently bisync just honors --ignore-checksum
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
// Currently bisync just honors --ignore-listing-checksum
|
||||
// (note that this is different from --ignore-checksum)
|
||||
// TODO add full support for checksums and related flags
|
||||
hashType = f.Hashes().GetOne()
|
||||
}
|
||||
ls = newFileList()
|
||||
ls.hash = hashType
|
||||
var lock sync.Mutex
|
||||
err = walk.ListR(ctx, f, "", false, depth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
listType := walk.ListObjects
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
listType = walk.ListAll
|
||||
}
|
||||
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
|
||||
var firstErr error
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
|
||||
@@ -276,12 +287,27 @@ func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (l
|
||||
}
|
||||
}
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
id := "" // TODO
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id)
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
//tr.Done(ctx, nil) // TODO
|
||||
})
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
entries.ForDir(func(o fs.Directory) {
|
||||
var (
|
||||
hashVal string
|
||||
)
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
|
||||
ls.put(o.Remote(), 0, time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
})
|
||||
}
|
||||
return firstErr
|
||||
})
|
||||
if err == nil {
|
||||
@@ -300,5 +326,53 @@ func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
}
|
||||
fs.Errorf(nil, "Empty %s listing. Cannot sync to an empty directory: %s", msg, listing)
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return fmt.Errorf("empty %s listing: %s", msg, listing)
|
||||
}
|
||||
|
||||
// listingNum should be 1 for path1 or 2 for path2
|
||||
func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
|
||||
listingpath := b.basePath + ".path1.lst-new"
|
||||
if listingNum == 2 {
|
||||
listingpath = b.basePath + ".path2.lst-new"
|
||||
}
|
||||
|
||||
if b.opt.DryRun {
|
||||
listingpath = strings.Replace(listingpath, ".lst-", ".lst-dry-", 1)
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "loading listing for path %d at: %s", listingNum, listingpath)
|
||||
return b.loadListing(listingpath)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
var fulllisting *fileList
|
||||
var dirsonly = newFileList()
|
||||
var err error
|
||||
|
||||
if !b.opt.CreateEmptySrcDirs {
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
fulllisting, err = b.loadListingNum(listingNum)
|
||||
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "Error loading listing to generate dirsonly list: %v", err)
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
for _, obj := range fulllisting.list {
|
||||
info := fulllisting.get(obj)
|
||||
|
||||
if info.flags == "d" {
|
||||
fs.Debugf(nil, "found a dir: %s", obj)
|
||||
dirsonly.put(obj, info.size, info.time, info.hash, info.id, info.flags)
|
||||
} else {
|
||||
fs.Debugf(nil, "not a dir: %s", obj)
|
||||
}
|
||||
}
|
||||
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
@@ -25,13 +25,14 @@ var ErrBisyncAborted = errors.New("bisync aborted")
|
||||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
type bisyncRun struct {
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
basePath string
|
||||
workDir string
|
||||
opt *Options
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
retryable bool
|
||||
basePath string
|
||||
workDir string
|
||||
opt *Options
|
||||
}
|
||||
|
||||
// Bisync handles lock file, performs bisync run and checks exit status
|
||||
@@ -123,14 +124,19 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if bilib.FileExists(listing1) {
|
||||
_ = os.Rename(listing1, listing1+"-err")
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")
|
||||
} else {
|
||||
if bilib.FileExists(listing1) {
|
||||
_ = os.Rename(listing1, listing1+"-err")
|
||||
}
|
||||
if bilib.FileExists(listing2) {
|
||||
_ = os.Rename(listing2, listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
|
||||
}
|
||||
if bilib.FileExists(listing2) {
|
||||
_ = os.Rename(listing2, listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort {
|
||||
@@ -152,6 +158,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err = b.checkSync(listing1, listing2); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -176,6 +183,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
var fctx context.Context
|
||||
if fctx, err = b.opt.applyFilters(octx); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return
|
||||
}
|
||||
|
||||
@@ -188,6 +196,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) {
|
||||
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run")
|
||||
}
|
||||
|
||||
@@ -215,6 +224,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -255,6 +265,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
changes1, changes2, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
// b.retryable = true // not sure about this one
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -283,6 +294,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
}
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -310,6 +322,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
}
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -341,6 +354,39 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
|
||||
return err
|
||||
}
|
||||
|
||||
// Check access health on the Path1 and Path2 filesystems
|
||||
// enforce even though this is --resync
|
||||
if b.opt.CheckAccess {
|
||||
fs.Infof(nil, "Checking access health")
|
||||
|
||||
ds1 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
ds2 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
for _, file := range filesNow1.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds1.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range filesNow2.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds2.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
copy2to1 := []string{}
|
||||
for _, file := range filesNow2.list {
|
||||
if !filesNow1.has(file) {
|
||||
@@ -367,11 +413,34 @@ func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string
|
||||
// prevent overwriting Google Doc files (their size is -1)
|
||||
filterSync.Opt.MinSize = 0
|
||||
}
|
||||
if err = sync.Sync(ctxSync, b.fs2, b.fs1, false); err != nil {
|
||||
if err = sync.CopyDir(ctxSync, b.fs2, b.fs1, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
// copy Path2 back to Path1, for empty dirs
|
||||
// the fastCopy above cannot include directories, because it relies on --files-from for filtering,
|
||||
// so instead we'll copy them here, relying on fctx for our filtering.
|
||||
|
||||
// This preserves the original resync order for backward compatibility. It is essentially:
|
||||
// rclone copy Path2 Path1 --ignore-existing
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
|
||||
// although if we were starting from scratch, it might be cleaner and faster to just do:
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
|
||||
fs.Infof(nil, "Resynching Path2 to Path1 (for empty dirs)")
|
||||
|
||||
//note copy (not sync) and dst comes before src
|
||||
if err = sync.CopyDir(ctxSync, b.fs1, b.fs2, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resync updating listings")
|
||||
if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil {
|
||||
b.critical = true
|
||||
|
||||
@@ -3,6 +3,7 @@ package bisync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -23,7 +24,7 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
|
||||
}
|
||||
}
|
||||
|
||||
return sync.CopyDir(ctxCopy, fdst, fsrc, false)
|
||||
return sync.CopyDir(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error {
|
||||
@@ -32,7 +33,14 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
|
||||
}
|
||||
|
||||
transfers := fs.GetConfig(ctx).Transfers
|
||||
ctxRun := b.opt.setDryRun(ctx)
|
||||
|
||||
ctxRun, filterDelete := filter.AddConfig(b.opt.setDryRun(ctx))
|
||||
|
||||
for _, file := range files.ToList() {
|
||||
if err := filterDelete.AddFile(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
objChan := make(fs.ObjectsChan, transfers)
|
||||
errChan := make(chan error, 1)
|
||||
@@ -53,6 +61,36 @@ func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names,
|
||||
return err
|
||||
}
|
||||
|
||||
// operation should be "make" or "remove"
|
||||
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, operation string) {
|
||||
if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") {
|
||||
|
||||
candidatesList := candidates.ToList()
|
||||
if operation == "remove" {
|
||||
// reverse the sort order to ensure we remove subdirs before parent dirs
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(candidatesList)))
|
||||
}
|
||||
|
||||
for _, s := range candidatesList {
|
||||
var direrr error
|
||||
if dirsList.has(s) { //make sure it's a dir, not a file
|
||||
if operation == "remove" {
|
||||
//note: we need to use Rmdirs instead of Rmdir because directories will fail to delete if they have other empty dirs inside of them.
|
||||
direrr = operations.Rmdirs(ctx, dst, s, false)
|
||||
} else if operation == "make" {
|
||||
direrr = operations.Mkdir(ctx, dst, s)
|
||||
} else {
|
||||
direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation)
|
||||
}
|
||||
|
||||
if direrr != nil {
|
||||
fs.Debugf(nil, "Error syncing directory: %v", direrr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
|
||||
if !b.opt.SaveQueues {
|
||||
return nil
|
||||
|
||||
@@ -26,6 +26,7 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
|
||||
if dryRun, err := in.GetBool("dryRun"); err == nil {
|
||||
ci.DryRun = dryRun
|
||||
opt.DryRun = dryRun
|
||||
} else if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
@@ -48,12 +49,21 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if opt.Force, err = in.GetBool("force"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.CreateEmptySrcDirs, err = in.GetBool("createEmptySrcDirs"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.RemoveEmptyDirs, err = in.GetBool("removeEmptyDirs"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.NoCleanup, err = in.GetBool("noCleanup"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.IgnoreListingChecksum, err = in.GetBool("ignoreListingChecksum"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.Resilient, err = in.GetBool("resilient"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
|
||||
if opt.CheckFilename, err = in.GetString("checkFilename"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
@@ -69,6 +79,9 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
if checkSync == "" {
|
||||
checkSync = "true"
|
||||
}
|
||||
if err := opt.CheckSync.Set(checkSync); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file3.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 md5:fb3ecfb2800400fb01b0bfd39903e9fb - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 39 md5:0860a03592626642f8fd6c8bfb447d2a - 2001-03-04T00:00:00.000000000+0000 "file5.txt..path1"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt..path2"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2000-01-01T00:00:00.000000000+0000 "file4.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
|
||||
- 39 md5:979a803b15d27df0c31ad7d29006d10b - 2001-01-02T00:00:00.000000000+0000 "file5.txt"
|
||||
- 19 md5:7fe98ed88552b828777d8630900346b8 - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
|
||||
@@ -68,6 +68,11 @@ INFO : - Path2 File was deleted - file7.txt
|
||||
INFO : - Path2 File was deleted - file8.txt
|
||||
INFO : Path2: 7 changes: 1 new, 3 newer, 0 older, 3 deleted
|
||||
INFO : Applying changes
|
||||
INFO : Checking potential conflicts...
|
||||
ERROR : file5.txt: md5 differ
|
||||
NOTICE: Local file system at {path2}: 1 differences found
|
||||
NOTICE: Local file system at {path2}: 1 errors while checking
|
||||
INFO : Finished checking the potential conflicts. 1 differences found
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file11.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file2.txt
|
||||
INFO : - Path2 Queue delete - {path2/}file4.txt
|
||||
|
||||
@@ -1 +1 @@
|
||||
This file is newer
|
||||
This file is newer and not equal to 5R
|
||||
|
||||
@@ -1 +1 @@
|
||||
This file is newer
|
||||
This file is newer and not equal to 5L
|
||||
|
||||
@@ -39,10 +39,12 @@ Bisync error: bisync aborted
|
||||
(10) : move-listings path2-missing
|
||||
|
||||
(11) : test 3. put the remote subdir .chk_file back, run resync.
|
||||
(12) : copy-file {path1/}subdir/.chk_file {path2/}
|
||||
(12) : copy-file {path1/}subdir/.chk_file {path2/}subdir/
|
||||
(13) : bisync check-access resync check-filename=.chk_file
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : Checking access health
|
||||
INFO : Found 2 matching ".chk_file" files on both paths
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
|
||||
@@ -20,7 +20,7 @@ bisync check-access check-filename=.chk_file
|
||||
move-listings path2-missing
|
||||
|
||||
test 3. put the remote subdir .chk_file back, run resync.
|
||||
copy-file {path1/}subdir/.chk_file {path2/}
|
||||
copy-file {path1/}subdir/.chk_file {path2/}subdir/
|
||||
bisync check-access resync check-filename=.chk_file
|
||||
|
||||
test 4. run sync with check-access. should pass.
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
"subdir"
|
||||
@@ -0,0 +1 @@
|
||||
"subdir"
|
||||
7
cmd/bisync/testdata/test_createemptysrcdirs/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
7
cmd/bisync/testdata/test_createemptysrcdirs/golden/_testdir_path1.._testdir_path2.path1.lst
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -0,0 +1,7 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
7
cmd/bisync/testdata/test_createemptysrcdirs/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
7
cmd/bisync/testdata/test_createemptysrcdirs/golden/_testdir_path1.._testdir_path2.path2.lst
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -0,0 +1,7 @@
|
||||
# bisync listing v1 from test
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
@@ -0,0 +1 @@
|
||||
"subdir"
|
||||
142
cmd/bisync/testdata/test_createemptysrcdirs/golden/test.log
vendored
Normal file
142
cmd/bisync/testdata/test_createemptysrcdirs/golden/test.log
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
(01) : test createemptysrcdirs
|
||||
|
||||
|
||||
(02) : test initial bisync
|
||||
(03) : touch-glob 2001-01-02 {datadir/} placeholder.txt
|
||||
(04) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
(05) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
(06) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
(07) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
(08) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
(09) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
(10) : bisync resync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
|
||||
(11) : test 1. Create an empty dir on Path1 by creating subdir/placeholder.txt and then deleting the placeholder
|
||||
(12) : copy-as {datadir/}placeholder.txt {path1/} subdir/placeholder.txt
|
||||
(13) : touch-glob 2001-01-02 {path1/} subdir
|
||||
(14) : delete-file {path1/}subdir/placeholder.txt
|
||||
|
||||
(15) : test 2. Run bisync without --create-empty-src-dirs
|
||||
(16) : bisync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(17) : test 3. Confirm the subdir exists only on Path1 and not Path2
|
||||
(18) : list-dirs {path1/}
|
||||
subdir/
|
||||
(19) : list-dirs {path2/}
|
||||
|
||||
(20) : test 4.Run bisync WITH --create-empty-src-dirs
|
||||
(21) : bisync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File is new - subdir
|
||||
INFO : Path1: 1 changes: 1 new, 0 newer, 0 older, 0 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}subdir
|
||||
INFO : - Path1 Do queued copies to - Path2
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(22) : test 5. Confirm the subdir exists on both paths
|
||||
(23) : list-dirs {path1/}
|
||||
subdir/
|
||||
(24) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(25) : test 6. Delete the empty dir on Path1 using purge-children (and also add files so the path isn't empty)
|
||||
(26) : purge-children {path1/}
|
||||
(27) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
(28) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
(29) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
(30) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
(31) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
(32) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
|
||||
(33) : test 7. Run bisync without --create-empty-src-dirs
|
||||
(34) : bisync
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File was deleted - RCLONE_TEST
|
||||
INFO : - Path1 File was deleted - subdir
|
||||
INFO : Path1: 2 changes: 0 new, 0 newer, 0 older, 2 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : - Path2 File was deleted - subdir
|
||||
INFO : Path2: 1 changes: 0 new, 0 newer, 0 older, 1 deleted
|
||||
INFO : Applying changes
|
||||
INFO : - Path2 Queue delete - {path2/}RCLONE_TEST
|
||||
INFO : - Do queued deletes on - Path2
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(35) : test 8. Confirm the subdir exists only on Path2 and not Path1
|
||||
(36) : list-dirs {path1/}
|
||||
(37) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(38) : test 9. Reset, do the delete again, and run bisync WITH --create-empty-src-dirs
|
||||
(39) : bisync resync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : - Path2 Resync will copy to Path1 - subdir
|
||||
INFO : - Path2 Resync is doing queued copies to - Path1
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Resynching Path2 to Path1 (for empty dirs)
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
(40) : list-dirs {path1/}
|
||||
subdir/
|
||||
(41) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(42) : purge-children {path1/}
|
||||
(43) : copy-as {datadir/}placeholder.txt {path1/} file1.txt
|
||||
(44) : copy-as {datadir/}placeholder.txt {path1/} file1.copy1.txt
|
||||
(45) : copy-as {datadir/}placeholder.txt {path1/} file1.copy2.txt
|
||||
(46) : copy-as {datadir/}placeholder.txt {path1/} file1.copy3.txt
|
||||
(47) : copy-as {datadir/}placeholder.txt {path1/} file1.copy4.txt
|
||||
(48) : copy-as {datadir/}placeholder.txt {path1/} file1.copy5.txt
|
||||
(49) : list-dirs {path1/}
|
||||
(50) : list-dirs {path2/}
|
||||
subdir/
|
||||
|
||||
(51) : bisync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File was deleted - subdir
|
||||
INFO : Path1: 1 changes: 0 new, 0 newer, 0 older, 1 deleted
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - Path2 Queue delete - {path2/}subdir
|
||||
INFO : - Do queued deletes on - Path2
|
||||
INFO : subdir: Removing directory
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
|
||||
(52) : test 10. Confirm the subdir has been removed on both paths
|
||||
(53) : list-dirs {path1/}
|
||||
(54) : list-dirs {path2/}
|
||||
|
||||
(55) : test 11. bisync again (because if we leave subdir in listings, test will fail due to mismatched modtime)
|
||||
(56) : bisync create-empty-src-dirs
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : No changes found
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
1
cmd/bisync/testdata/test_createemptysrcdirs/initial/RCLONE_TEST
vendored
Normal file
1
cmd/bisync/testdata/test_createemptysrcdirs/initial/RCLONE_TEST
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This file is used for testing the health of rclone accesses to the local/remote file system. Do not delete.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user