mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
127 Commits
fix-6433-w
...
fix-vfs-la
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7cba2835d | ||
|
|
a61d219bcd | ||
|
|
652d3cdee4 | ||
|
|
bb1fc5b86d | ||
|
|
efd3c6449b | ||
|
|
0ac5795f8c | ||
|
|
2f77651f64 | ||
|
|
8daacc2b99 | ||
|
|
87fa9f8e46 | ||
|
|
1392793334 | ||
|
|
0e427216db | ||
|
|
0c56c46523 | ||
|
|
617c5d5e1b | ||
|
|
ec2024b907 | ||
|
|
458845ce89 | ||
|
|
57bde20acd | ||
|
|
b0248e8070 | ||
|
|
b285efb476 | ||
|
|
be6f29930b | ||
|
|
653bc23728 | ||
|
|
47b04580db | ||
|
|
919e28b8bf | ||
|
|
3a3bc5a1ae | ||
|
|
133c006c37 | ||
|
|
e455940f71 | ||
|
|
65528fd009 | ||
|
|
691159fe94 | ||
|
|
09858c0c5a | ||
|
|
5fd0abb2b9 | ||
|
|
36c37ffec1 | ||
|
|
6a5b7664f7 | ||
|
|
ebac854512 | ||
|
|
cafce96185 | ||
|
|
92ffcf9f86 | ||
|
|
64cdbb67b5 | ||
|
|
528fc899fb | ||
|
|
d452f502c3 | ||
|
|
5d6b8141ec | ||
|
|
776e5ea83a | ||
|
|
c9acc06a49 | ||
|
|
a2dca02594 | ||
|
|
210331bf61 | ||
|
|
5b5fdc6bc5 | ||
|
|
0de74864b6 | ||
|
|
7042a11875 | ||
|
|
028832ce73 | ||
|
|
c7c9356af5 | ||
|
|
3292c112c5 | ||
|
|
126d71b332 | ||
|
|
df9be72a82 | ||
|
|
6aa8f7409a | ||
|
|
10c884552c | ||
|
|
2617610741 | ||
|
|
53dd174f3d | ||
|
|
65987f5970 | ||
|
|
1fc864fb32 | ||
|
|
22abcc9fd2 | ||
|
|
178cf821de | ||
|
|
f4a571786c | ||
|
|
c0a8ffcbef | ||
|
|
76eeca9eae | ||
|
|
8114744bce | ||
|
|
db5d582404 | ||
|
|
01dbbff62e | ||
|
|
afa61e702c | ||
|
|
546dc82793 | ||
|
|
d9c4d95ab3 | ||
|
|
0fb1b75a02 | ||
|
|
38f1f5b177 | ||
|
|
0d2a62a927 | ||
|
|
b75c207208 | ||
|
|
dff223f195 | ||
|
|
d2fef05fe4 | ||
|
|
188b9f8cf1 | ||
|
|
daf3162bcf | ||
|
|
5e59e7f442 | ||
|
|
fce22c0065 | ||
|
|
bb3272e837 | ||
|
|
cb5b5635c7 | ||
|
|
66ed0ca726 | ||
|
|
b16e50851a | ||
|
|
90d23139f6 | ||
|
|
5ea9398b63 | ||
|
|
3f804224f4 | ||
|
|
cf0bf159ab | ||
|
|
6654b66114 | ||
|
|
9bf78d0373 | ||
|
|
0c1fb8b2b7 | ||
|
|
966654e23a | ||
|
|
13b65104eb | ||
|
|
4a35aff33c | ||
|
|
09b6d939f5 | ||
|
|
4e79de106a | ||
|
|
b437d9461a | ||
|
|
910af597a1 | ||
|
|
c10965ecfb | ||
|
|
5efb880772 | ||
|
|
6c3b7d5820 | ||
|
|
c5109408c0 | ||
|
|
a3c06b9bbe | ||
|
|
2aa264b33c | ||
|
|
4e078765f9 | ||
|
|
7fbc928a19 | ||
|
|
27096323db | ||
|
|
7e547822d6 | ||
|
|
67625b1dbd | ||
|
|
88086643f7 | ||
|
|
5f13d84135 | ||
|
|
07efdb55fa | ||
|
|
fb6ddd680c | ||
|
|
bc09105d2e | ||
|
|
4f374bc264 | ||
|
|
1c99661d8c | ||
|
|
04b54bbb1e | ||
|
|
90cda2d6c2 | ||
|
|
dbd9ce78e6 | ||
|
|
cbc18e2693 | ||
|
|
67c675d7ad | ||
|
|
c080b39e47 | ||
|
|
8504da496b | ||
|
|
67240bd541 | ||
|
|
6ce0168ba5 | ||
|
|
67f5f04a77 | ||
|
|
91f8894285 | ||
|
|
655d63b4fd | ||
|
|
d3d843a11d | ||
|
|
57803bee22 |
46
.github/workflows/build.yml
vendored
46
.github/workflows/build.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,14 +41,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -57,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -74,20 +74,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.17'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -97,14 +97,13 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
@@ -162,7 +161,7 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -226,7 +225,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
@@ -234,6 +233,19 @@ jobs:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
@@ -242,18 +254,18 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# Upgrade together with NDK version
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v1
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.19
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
@@ -12,7 +12,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish image
|
||||
|
||||
@@ -11,7 +11,7 @@ jobs:
|
||||
name: Build image job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Get actual patch version
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
name: Build docker plugin job
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Build and publish docker plugin
|
||||
|
||||
@@ -20,7 +20,7 @@ issues:
|
||||
exclude-use-default: false
|
||||
|
||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
||||
max-per-linter: 0
|
||||
max-issues-per-linter: 0
|
||||
|
||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
||||
max-same-issues: 0
|
||||
|
||||
2343
MANUAL.html
generated
2343
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2676
MANUAL.txt
generated
2676
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
@@ -45,10 +45,10 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
@@ -62,17 +62,20 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
|
||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* Storj [:page_facing_up:](https://rclone.org/storj/)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
|
||||
@@ -53,6 +53,14 @@ doing that so it may be necessary to roll back dependencies to the
|
||||
version specified by `make updatedirect` in order to get rclone to
|
||||
build.
|
||||
|
||||
## Tidy beta
|
||||
|
||||
At some point after the release run
|
||||
|
||||
bin/tidy-beta v1.55
|
||||
|
||||
where the version number is that of a couple ago to remove old beta binaries.
|
||||
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/hdfs"
|
||||
_ "github.com/rclone/rclone/backend/hidrive"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
@@ -35,6 +34,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/netstorage"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
@@ -44,6 +44,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sia"
|
||||
_ "github.com/rclone/rclone/backend/smb"
|
||||
_ "github.com/rclone/rclone/backend/storj"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -595,7 +595,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
)
|
||||
switch {
|
||||
case opt.UseEmulator:
|
||||
credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey)
|
||||
var actualEmulatorAccount = emulatorAccount
|
||||
if opt.Account != "" {
|
||||
actualEmulatorAccount = opt.Account
|
||||
}
|
||||
var actualEmulatorKey = emulatorAccountKey
|
||||
if opt.Key != "" {
|
||||
actualEmulatorKey = opt.Key
|
||||
}
|
||||
credential, err := azblob.NewSharedKeyCredential(actualEmulatorAccount, actualEmulatorKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse credentials: %w", err)
|
||||
}
|
||||
@@ -717,7 +725,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("failed to make azure storage url from account and endpoint: %w", err)
|
||||
}
|
||||
// Try loading service principal credentials from file.
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
|
||||
}
|
||||
@@ -1677,6 +1685,26 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
// Apply upload options (also allows one to overwrite content-type)
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
httpHeaders.CacheControl = value
|
||||
case "content-disposition":
|
||||
httpHeaders.ContentDisposition = value
|
||||
case "content-encoding":
|
||||
httpHeaders.ContentEncoding = value
|
||||
case "content-language":
|
||||
httpHeaders.ContentLanguage = value
|
||||
case "content-type":
|
||||
httpHeaders.ContentType = value
|
||||
}
|
||||
}
|
||||
|
||||
uploadParts := maxUploadParts
|
||||
if uploadParts < 1 {
|
||||
uploadParts = 1
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
@@ -97,7 +96,7 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
_, err = io.Copy(io.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
@@ -112,12 +111,12 @@ func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
}
|
||||
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -183,7 +183,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
file, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
|
||||
7
backend/cache/cache_internal_test.go
vendored
7
backend/cache/cache_internal_test.go
vendored
@@ -11,7 +11,6 @@ import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -167,7 +166,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
||||
for _, r := range li2 {
|
||||
var err error
|
||||
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
if err != nil || len(ci) == 0 {
|
||||
log.Printf("========== '%v' not in cache", r)
|
||||
} else {
|
||||
@@ -841,7 +840,7 @@ func newRun() *run {
|
||||
}
|
||||
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
@@ -984,7 +983,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
chunk := int64(1024)
|
||||
cnt := size / chunk
|
||||
left := size % chunk
|
||||
f, err := ioutil.TempFile("", "rclonecache-tempfile")
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
|
||||
4
backend/cache/plex.go
vendored
4
backend/cache/plex.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
data, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
5
backend/cache/storage_persistent.go
vendored
5
backend/cache/storage_persistent.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -473,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
||||
var data []byte
|
||||
|
||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||
data, err := ioutil.ReadFile(fp)
|
||||
data, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -486,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||
|
||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -1038,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := ioutil.ReadAll(reader)
|
||||
metadata, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1097,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
data, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
chunkContents, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -29,6 +28,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
@@ -367,13 +367,16 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
meta := readMetadata(ctx, mo)
|
||||
if meta == nil {
|
||||
return nil, errors.New("error decoding metadata")
|
||||
meta, err := readMetadata(ctx, mo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding metadata: %w", err)
|
||||
}
|
||||
// Create our Object
|
||||
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
|
||||
return f.newObject(o, mo, meta), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o, mo, meta), nil
|
||||
}
|
||||
|
||||
// checkCompressAndType checks if an object is compressible and determines it's mime type
|
||||
@@ -464,7 +467,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||
tempFile, err := os.CreateTemp("", "rclone-press-")
|
||||
defer func() {
|
||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||
// to ignore them
|
||||
@@ -542,8 +545,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
@@ -677,7 +680,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(dataObject, mo, meta), err
|
||||
return f.newObject(dataObject, mo, meta), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
@@ -1040,24 +1043,19 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
|
||||
}
|
||||
|
||||
// This function will read the metadata from a metadata object.
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
|
||||
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
|
||||
// Open our meradata object
|
||||
rc, err := mo.Open(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
err := rc.Close()
|
||||
if err != nil {
|
||||
fs.Errorf(mo, "Error closing object: %v", err)
|
||||
}
|
||||
}()
|
||||
defer fs.CheckClose(rc, &err)
|
||||
jr := json.NewDecoder(rc)
|
||||
meta = new(ObjectMetadata)
|
||||
if err = jr.Decode(meta); err != nil {
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
return meta
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
@@ -1102,6 +1100,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
origName := o.Remote()
|
||||
if o.meta.Mode != Uncompressed || compressible {
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if newObject.Object.Remote() != o.Object.Remote() {
|
||||
if removeErr := o.Object.Remove(ctx); removeErr != nil {
|
||||
return removeErr
|
||||
@@ -1115,9 +1116,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
// If we are, just update the object and metadata
|
||||
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Update object metadata and return
|
||||
o.Object = newObject.Object
|
||||
@@ -1128,6 +1129,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
|
||||
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1140,6 +1144,9 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
|
||||
|
||||
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
|
||||
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
|
||||
if o == nil {
|
||||
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
|
||||
}
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
@@ -1167,7 +1174,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
|
||||
return err
|
||||
}
|
||||
if o.meta == nil {
|
||||
o.meta = readMetadata(ctx, o.mo)
|
||||
o.meta, err = readMetadata(ctx, o.mo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -1073,7 +1072,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
source := newRandomSource(copySize)
|
||||
encrypted, err := c.newEncrypter(source, nil)
|
||||
assert.NoError(t, err)
|
||||
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
|
||||
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
|
||||
assert.NoError(t, err)
|
||||
sink := newRandomSource(copySize)
|
||||
n, err := io.CopyBuffer(sink, decrypted, buf)
|
||||
@@ -1144,15 +1143,15 @@ func TestEncryptData(t *testing.T) {
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
out, err := ioutil.ReadAll(encrypted)
|
||||
out, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expected, out)
|
||||
|
||||
// Check we can decode the data properly too...
|
||||
buf = bytes.NewBuffer(out)
|
||||
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
|
||||
decrypted, err := c.DecryptData(io.NopCloser(buf))
|
||||
assert.NoError(t, err)
|
||||
out, err = ioutil.ReadAll(decrypted)
|
||||
out, err = io.ReadAll(decrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, out)
|
||||
}
|
||||
@@ -1187,7 +1186,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -1257,12 +1256,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in1 := bytes.NewBuffer(file16)
|
||||
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||
in := io.NopCloser(io.MultiReader(in1, in2))
|
||||
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
@@ -1274,14 +1273,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
|
||||
// Make random data
|
||||
const dataSize = 150000
|
||||
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
|
||||
plaintext, err := io.ReadAll(newRandomSource(dataSize))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Encrypt the data
|
||||
buf := bytes.NewBuffer(plaintext)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
ciphertext, err := ioutil.ReadAll(encrypted)
|
||||
ciphertext, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
|
||||
@@ -1300,7 +1299,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
}
|
||||
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
@@ -1490,7 +1489,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err, what)
|
||||
continue
|
||||
}
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
var expectedErr error
|
||||
switch {
|
||||
case i == fileHeaderSize:
|
||||
@@ -1514,7 +1513,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
cd := newCloseDetector(in)
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
@@ -1524,13 +1523,13 @@ func TestDecrypterRead(t *testing.T) {
|
||||
copy(file16copy, file16)
|
||||
for i := range file16copy {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
@@ -1565,7 +1564,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// close after reading
|
||||
out, err := ioutil.ReadAll(fh)
|
||||
out, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{1}, out)
|
||||
assert.Equal(t, io.EOF, fh.err)
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
@@ -1108,7 +1108,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
@@ -3800,7 +3800,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
data = data[:limit]
|
||||
}
|
||||
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
return io.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
@@ -78,7 +77,7 @@ var additionalMimeTypes = map[string]string{
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -1186,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
if o.contentType == emptyMimeType {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
resp, err := o.fs.rpc(ctx, "getFile", params{
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -124,6 +124,11 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
|
||||
Help: "Use MDTM to set modification time (VsFtpd quirk)",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "force_list_hidden",
|
||||
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -205,6 +210,7 @@ type Options struct {
|
||||
DisableMLSD bool `config:"disable_mlsd"`
|
||||
DisableUTF8 bool `config:"disable_utf8"`
|
||||
WritingMDTM bool `config:"writing_mdtm"`
|
||||
ForceListHidden bool `config:"force_list_hidden"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
CloseTimeout fs.Duration `config:"close_timeout"`
|
||||
ShutTimeout fs.Duration `config:"shut_timeout"`
|
||||
@@ -330,14 +336,44 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
|
||||
// Make ftp library dial with fshttp dialer optionally using TLS
|
||||
initialConnection := true
|
||||
dial := func(network, address string) (conn net.Conn, err error) {
|
||||
fs.Debugf(f, "dial(%q,%q)", network, address)
|
||||
defer func() {
|
||||
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
|
||||
}()
|
||||
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
|
||||
if f.tlsConf != nil && err == nil {
|
||||
conn = tls.Client(conn, f.tlsConf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
// Connect using cleartext only for non TLS
|
||||
if f.tlsConf == nil {
|
||||
return conn, nil
|
||||
}
|
||||
// Initial connection only needs to be cleartext for explicit TLS
|
||||
if f.opt.ExplicitTLS && initialConnection {
|
||||
initialConnection = false
|
||||
return conn, nil
|
||||
}
|
||||
// Upgrade connection to TLS
|
||||
tlsConn := tls.Client(conn, f.tlsConf)
|
||||
// Do the initial handshake - tls.Client doesn't do it for us
|
||||
// If we do this then connections to proftpd/pureftpd lock up
|
||||
// See: https://github.com/rclone/rclone/issues/6426
|
||||
// See: https://github.com/jlaffaye/ftp/issues/282
|
||||
if false {
|
||||
err = tlsConn.HandshakeContext(ctx)
|
||||
if err != nil {
|
||||
_ = conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return tlsConn, nil
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{
|
||||
ftp.DialWithContext(ctx),
|
||||
ftp.DialWithDialFunc(dial),
|
||||
}
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
|
||||
|
||||
if f.opt.TLS {
|
||||
// Our dialer takes care of TLS but ftp library also needs tlsConf
|
||||
@@ -345,12 +381,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
|
||||
} else if f.opt.ExplicitTLS {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
|
||||
// Initial connection needs to be cleartext for explicit TLS
|
||||
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
@@ -367,6 +397,9 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
|
||||
if f.opt.WritingMDTM {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
|
||||
}
|
||||
if f.opt.ForceListHidden {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
|
||||
}
|
||||
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
|
||||
}
|
||||
|
||||
@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||
// test that big file uploads do not cause network i/o timeout
|
||||
func (f *Fs) testUploadTimeout(t *testing.T) {
|
||||
const (
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 40 * time.Millisecond // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
fileSize = 100000000 // 100 MiB
|
||||
idleTimeout = 1 * time.Second // small because test server is local
|
||||
maxTime = 10 * time.Second // prevent test hangup
|
||||
)
|
||||
|
||||
if testing.Short() {
|
||||
|
||||
@@ -19,8 +19,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -487,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -99,7 +99,7 @@ func TestIntegration(t *testing.T) {
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
buf, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
if _, err = io.Copy(io.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -305,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat(ctx)
|
||||
err := o.head(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -317,15 +316,6 @@ func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
@@ -500,7 +490,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
switch err := file.head(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
@@ -579,8 +569,8 @@ func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
// head sends a HEAD request to update info fields in the Object
|
||||
func (o *Object) head(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
@@ -601,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
return o.decodeMetadata(ctx, res)
|
||||
}
|
||||
|
||||
// decodeMetadata updates info fields in the Object according to HTTP response headers
|
||||
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
@@ -653,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
if err = o.decodeMetadata(ctx, res); err != nil {
|
||||
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package http
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@@ -41,12 +41,12 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
fileList, err := os.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
@@ -194,31 +194,66 @@ func TestNewObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
for _, head := range []bool{false, true} {
|
||||
if !head {
|
||||
m.Set("no_head", "true")
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
for _, rangeRead := range []bool{false, true} {
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
if !head {
|
||||
// Test mod time is still indeterminate
|
||||
tObj := o.ModTime(context.Background())
|
||||
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
|
||||
|
||||
// Test file size is still indeterminate
|
||||
assert.Equal(t, int64(-1), o.Size())
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if !rangeRead {
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
} else {
|
||||
// Test with range request
|
||||
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
// Test the time is always correct on the object after file open
|
||||
tObj := o.ModTime(context.Background())
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
if !rangeRead {
|
||||
// Test the file size
|
||||
assert.Equal(t, int64(len(data)), o.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs an http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
@@ -1,200 +0,0 @@
|
||||
// Package hubic provides an interface to the Hubic object storage
|
||||
// system.
|
||||
package hubic
|
||||
|
||||
// This uses the normal swift mechanism to update the credentials and
|
||||
// ignores the expires field returned by the Hubic API. This may need
|
||||
// to be revisited after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
swiftLib "github.com/ncw/swift/v2"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
|
||||
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
oauthConfig = &oauth2.Config{
|
||||
Scopes: []string{
|
||||
"credentials.r", // Read OpenStack credentials
|
||||
},
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: "https://api.hubic.com/oauth/auth/",
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "hubic",
|
||||
Description: "Hubic",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
OAuth2Config: oauthConfig,
|
||||
})
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
|
||||
})
|
||||
}
|
||||
|
||||
// credentials is the JSON returned from the Hubic API to read the
|
||||
// OpenStack credentials
|
||||
type credentials struct {
|
||||
Token string `json:"token"` // OpenStack token
|
||||
Endpoint string `json:"endpoint"` // OpenStack endpoint
|
||||
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
|
||||
}
|
||||
|
||||
// Fs represents a remote hubic
|
||||
type Fs struct {
|
||||
fs.Fs // wrapped Fs
|
||||
features *fs.Features // optional features
|
||||
client *http.Client // client for oauth api
|
||||
credentials credentials // returned from the Hubic API
|
||||
expires time.Time // time credentials expire
|
||||
}
|
||||
|
||||
// Object describes a swift object
|
||||
type Object struct {
|
||||
*swift.Object
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Object.String()
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.Fs == nil {
|
||||
return "Hubic"
|
||||
}
|
||||
return fmt.Sprintf("Hubic %s", f.Fs.String())
|
||||
}
|
||||
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
if resp.StatusCode < 200 || resp.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
|
||||
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
|
||||
}
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
var result credentials
|
||||
err = decoder.Decode(&result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fs.Debugf(f, "Got credentials %+v", result)
|
||||
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
|
||||
return errors.New("couldn't read token, result and expired from credentials")
|
||||
}
|
||||
f.credentials = result
|
||||
expires, err := time.Parse(time.RFC3339, result.Expires)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.expires = expires
|
||||
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
client: client,
|
||||
}
|
||||
|
||||
// Make the swift Connection
|
||||
ci := fs.GetConfig(ctx)
|
||||
c := &swiftLib.Connection{
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
|
||||
Transport: fshttp.NewTransport(ctx),
|
||||
}
|
||||
err = c.Authenticate(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
|
||||
}
|
||||
|
||||
// Parse config into swift.Options struct
|
||||
opt := new(swift.Options)
|
||||
err = configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
f.Fs = swiftFs
|
||||
f.features = f.Fs.Features().Wrap(f)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
)
|
||||
@@ -1,19 +0,0 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -822,7 +821,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if legacyTokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
refreshBody, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -832,7 +831,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1789,7 +1788,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = ioutil.TempFile("", cachePrefix)
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1817,7 +1816,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = ioutil.ReadAll(teeReader)
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1914,7 +1913,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
_, err = io.CopyN(io.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -124,8 +123,8 @@ routine so this flag shouldn't normally be used.`,
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (e.g.
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
@@ -521,11 +520,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := f.cleanRemote(dir, name)
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
// Follow symlinks if required
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
@@ -542,6 +536,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
// Don't include non directory if not included
|
||||
// we leave directory filtering to the layer above
|
||||
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
@@ -646,7 +645,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
precision = time.Second
|
||||
|
||||
// Create temporary file and test it
|
||||
fd, err := ioutil.TempFile("", "rclone")
|
||||
fd, err := os.CreateTemp("", "rclone")
|
||||
if err != nil {
|
||||
// If failed return 1s
|
||||
// fmt.Println("Failed to create temp file", err)
|
||||
@@ -1073,7 +1072,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
@@ -1400,30 +1399,27 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
||||
}
|
||||
|
||||
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
} else {
|
||||
s = filepath.Clean(s)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
|
||||
if !noUNC {
|
||||
// Convert to UNC
|
||||
s = file.UNCPath(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
s = enc.FromStandardPath(s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -150,7 +150,7 @@ func TestSymlink(t *testing.T) {
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
contents, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -158,7 +158,7 @@ func TestSymlink(t *testing.T) {
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
contents, err = io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
|
||||
@@ -5,19 +5,41 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
statxCheckOnce sync.Once
|
||||
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
|
||||
)
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
statxCheckOnce.Do(func() {
|
||||
// Check statx() is available as it was only introduced in kernel 4.11
|
||||
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
|
||||
var stat unix.Statx_t
|
||||
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
|
||||
readMetadataFromFileFn = readMetadataFromFileStatx
|
||||
} else {
|
||||
readMetadataFromFileFn = readMetadataFromFileFstatat
|
||||
}
|
||||
})
|
||||
return readMetadataFromFileFn(o, m)
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Statx_t
|
||||
// statx() was added to Linux in kernel 4.11
|
||||
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
|
||||
unix.STATX_TYPE | // Want stx_mode & S_IFMT
|
||||
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
|
||||
@@ -45,3 +67,36 @@ func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
|
||||
setTime("btime", stat.Btime)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the file into metadata where possible
|
||||
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
|
||||
flags := unix.AT_SYMLINK_NOFOLLOW
|
||||
if o.fs.opt.FollowSymlinks {
|
||||
flags = 0
|
||||
}
|
||||
var stat unix.Stat_t
|
||||
// fstatat() was added to Linux in kernel 2.6.16
|
||||
// Go only supports 2.6.32 or later
|
||||
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
|
||||
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
|
||||
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
|
||||
if stat.Rdev != 0 {
|
||||
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
|
||||
}
|
||||
setTime := func(key string, t unix.Timespec) {
|
||||
// The types of t.Sec and t.Nsec vary from int32 to int64 on
|
||||
// different Linux architectures so we need to cast them to
|
||||
// int64 here and hence need to quiet the linter about
|
||||
// unecessary casts.
|
||||
//
|
||||
// nolint: unconvert
|
||||
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
|
||||
}
|
||||
setTime("atime", stat.Atim)
|
||||
setTime("mtime", stat.Mtim)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
fd, err := os.CreateTemp("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
|
||||
@@ -69,6 +69,11 @@ func (w *BinWriter) WritePu64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteP64 writes an signed long as unsigned varint
|
||||
func (w *BinWriter) WriteP64(val int64) {
|
||||
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
|
||||
}
|
||||
|
||||
// WriteString writes a zero-terminated string
|
||||
func (w *BinWriter) WriteString(str string) {
|
||||
buf := []byte(str)
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@@ -91,8 +90,13 @@ func init() {
|
||||
Help: "User name (usually email).",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "Password.",
|
||||
Name: "pass",
|
||||
Help: `Password.
|
||||
|
||||
This must be an app password - rclone will not work with your normal
|
||||
password. See the Configuration section in the docs for how to make an
|
||||
app password.
|
||||
`,
|
||||
Required: true,
|
||||
IsPassword: true,
|
||||
}, {
|
||||
@@ -641,12 +645,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
mTime := int64(item.Mtime)
|
||||
if mTime < 0 {
|
||||
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
|
||||
mTime = 0
|
||||
}
|
||||
modTime := time.Unix(mTime, 0)
|
||||
modTime := time.Unix(int64(item.Mtime), 0)
|
||||
|
||||
isDir, err := f.isDir(item.Kind, remote)
|
||||
if err != nil {
|
||||
@@ -1660,7 +1659,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Attempt to put by calculating hash in memory
|
||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||
fileBuf, err = ioutil.ReadAll(in)
|
||||
fileBuf, err = io.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1703,7 +1702,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if size <= mrhash.Size {
|
||||
// Optimize upload: skip extra request if data fits in the hash buffer.
|
||||
if fileBuf == nil {
|
||||
fileBuf, err = ioutil.ReadAll(wrapIn)
|
||||
fileBuf, err = io.ReadAll(wrapIn)
|
||||
}
|
||||
if fileHash == nil && err == nil {
|
||||
fileHash = mrhash.Sum(fileBuf)
|
||||
@@ -2058,7 +2057,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
|
||||
req.WritePu16(0) // revision
|
||||
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
|
||||
req.WritePu64(o.size)
|
||||
req.WritePu64(o.modTime.Unix())
|
||||
req.WriteP64(o.modTime.Unix())
|
||||
req.WritePu32(0)
|
||||
req.Write(o.mrHash)
|
||||
|
||||
@@ -2214,7 +2213,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
_, err = io.CopyN(io.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return nil, err
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -575,7 +574,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
data = data[:limit]
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(data)), nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -583,7 +582,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update memory object: %w", err)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -972,7 +971,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
|
||||
URL = o.fs.url(src.Remote())
|
||||
}
|
||||
if strings.HasSuffix(URL, ".rclonelink") {
|
||||
bits, err := ioutil.ReadAll(in)
|
||||
bits, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1058,7 +1057,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
|
||||
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
|
||||
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
|
||||
reader := strings.NewReader(o.target)
|
||||
readcloser := ioutil.NopCloser(reader)
|
||||
readcloser := io.NopCloser(reader)
|
||||
return readcloser, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -891,6 +891,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
f.features.ChangeNotify = nil
|
||||
}
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, _, err := f.readMetaDataForPath(ctx, "")
|
||||
|
||||
158
backend/oracleobjectstorage/client.go
Normal file
158
backend/oracleobjectstorage/client.go
Normal file
@@ -0,0 +1,158 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
fs.Infof("client", "using no auth provider")
|
||||
return getNoAuthConfiguration()
|
||||
default:
|
||||
}
|
||||
return common.DefaultConfigProvider(), nil
|
||||
}
|
||||
|
||||
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
|
||||
p, err := getConfigurationProvider(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
|
||||
if err != nil {
|
||||
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if opt.Region != "" {
|
||||
client.SetRegion(opt.Region)
|
||||
}
|
||||
modifyClient(ctx, opt, &client.BaseClient)
|
||||
return &client, err
|
||||
}
|
||||
|
||||
func fileExists(filePath string) bool {
|
||||
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
|
||||
client.HTTPClient = getHTTPClient(ctx)
|
||||
if opt.Provider == noAuth {
|
||||
client.Signer = getNoAuthSigner()
|
||||
}
|
||||
}
|
||||
|
||||
// getClient makes http client according to the global options
|
||||
// this has rclone specific options support like dump headers, body etc.
|
||||
func getHTTPClient(ctx context.Context) *http.Client {
|
||||
return fshttp.NewClient(ctx)
|
||||
}
|
||||
|
||||
var retryErrorCodes = []int{
|
||||
408, // Request Timeout
|
||||
429, // Rate exceeded.
|
||||
500, // Get occasional 500 Internal Server Error
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Time-out
|
||||
}
|
||||
|
||||
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
// If this is an ocierr object, try and extract more useful information to determine if we should retry
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// Simple case, check the original embedded error in case it's generically retryable
|
||||
if fserrors.ShouldRetry(err) {
|
||||
return true, err
|
||||
}
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetCode() == "RequestTimeout" {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
// Ok, not an oci error, check for generic failure conditions
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
|
||||
return &noAuthConfigurator{}, nil
|
||||
}
|
||||
|
||||
func getNoAuthSigner() common.HTTPRequestSigner {
|
||||
return &noAuthSigner{}
|
||||
}
|
||||
|
||||
type noAuthConfigurator struct {
|
||||
}
|
||||
|
||||
type noAuthSigner struct {
|
||||
}
|
||||
|
||||
func (n *noAuthSigner) Sign(*http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) UserOCID() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) Region() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
|
||||
return common.AuthConfig{
|
||||
AuthType: common.UnknownAuthenticationType,
|
||||
IsFromConfigFile: false,
|
||||
OboToken: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ common.ConfigurationProvider = &noAuthConfigurator{}
|
||||
_ common.HTTPRequestSigner = &noAuthSigner{}
|
||||
)
|
||||
228
backend/oracleobjectstorage/command.go
Normal file
228
backend/oracleobjectstorage/command.go
Normal file
@@ -0,0 +1,228 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Command Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: operationRename,
|
||||
Short: "change the name of an object",
|
||||
Long: `This command can be used to rename a object.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
|
||||
`,
|
||||
Opts: nil,
|
||||
}, {
|
||||
Name: operationListMultiPart,
|
||||
Short: "List the unfinished multipart uploads",
|
||||
Long: `This command lists the unfinished multipart uploads in JSON format.
|
||||
|
||||
rclone backend list-multipart-uploads oos:bucket/path/to/object
|
||||
|
||||
It returns a dictionary of buckets with values as lists of unfinished
|
||||
multipart uploads.
|
||||
|
||||
You can call it with no bucket in which case it lists all bucket, with
|
||||
a bucket or with a bucket and path.
|
||||
|
||||
{
|
||||
"test-bucket": [
|
||||
{
|
||||
"namespace": "test-namespace",
|
||||
"bucket": "test-bucket",
|
||||
"object": "600m.bin",
|
||||
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
|
||||
"timeCreated": "2022-07-29T06:21:16.595Z",
|
||||
"storageTier": "Standard"
|
||||
}
|
||||
]
|
||||
`,
|
||||
}, {
|
||||
Name: operationCleanup,
|
||||
Short: "Remove unfinished multipart uploads.",
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
/*
|
||||
Command the backend to run a named command
|
||||
|
||||
The command run is name
|
||||
args may be used to read arguments from
|
||||
opts may be used to read optional arguments from
|
||||
|
||||
The result should be capable of being JSON encoded
|
||||
If it is a string or a []string it will be shown to the user
|
||||
otherwise it will be JSON encoded and shown to the user like that
|
||||
*/
|
||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
opt map[string]string) (result interface{}, err error) {
|
||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||
switch commandName {
|
||||
case operationRename:
|
||||
if len(args) < 2 {
|
||||
return nil, fmt.Errorf("path to object or its new name to rename is empty")
|
||||
}
|
||||
remote := args[0]
|
||||
newName := args[1]
|
||||
return f.rename(ctx, remote, newName)
|
||||
case operationListMultiPart:
|
||||
return f.listMultipartUploadsAll(ctx)
|
||||
case operationCleanup:
|
||||
maxAge := 24 * time.Hour
|
||||
if opt["max-age"] != "" {
|
||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad max-age: %w", err)
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
|
||||
if remote == "" {
|
||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||
}
|
||||
if newName == "" {
|
||||
return nil, fmt.Errorf("the object's new name cannot be empty")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
bucketName, objectPath := o.split()
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
|
||||
if strings.HasPrefix(objectPath, bucketName) {
|
||||
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
|
||||
objectPath, bucketName)
|
||||
}
|
||||
return nil, fs.ErrorNotAFile
|
||||
}
|
||||
details := objectstorage.RenameObjectDetails{
|
||||
SourceName: common.String(objectPath),
|
||||
NewName: common.String(newName),
|
||||
}
|
||||
request := objectstorage.RenameObjectRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
RenameObjectDetails: details,
|
||||
OpcClientRequestId: nil,
|
||||
RequestMetadata: common.RequestMetadata{},
|
||||
}
|
||||
var response objectstorage.RenameObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RenameObject(ctx, request)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
|
||||
return "renamed successfully", nil
|
||||
}
|
||||
|
||||
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
|
||||
err error) {
|
||||
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
|
||||
bucket, directory := f.split("")
|
||||
if bucket != "" {
|
||||
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
return uploadsMap, nil
|
||||
}
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return uploadsMap, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
bucket := entry.Remote()
|
||||
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
|
||||
if listErr != nil {
|
||||
err = listErr
|
||||
fs.Errorf(f, "%v", err)
|
||||
}
|
||||
uploadsMap[bucket] = uploads
|
||||
}
|
||||
return uploadsMap, err
|
||||
}
|
||||
|
||||
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
|
||||
//
|
||||
// Note that rather lazily we treat key as a prefix, so it matches
|
||||
// directories and objects. This could surprise the user if they ask
|
||||
// for "dir" and it returns "dirKey"
|
||||
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
|
||||
uploads []*objectstorage.MultipartUpload, err error) {
|
||||
|
||||
uploads = []*objectstorage.MultipartUpload{}
|
||||
req := objectstorage.ListMultipartUploadsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
|
||||
var response objectstorage.ListMultipartUploadsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.ListMultipartUploads(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
// fs.Debugf(f, "failed to list multi part uploads %v", err)
|
||||
return uploads, err
|
||||
}
|
||||
for index, item := range response.Items {
|
||||
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
|
||||
continue
|
||||
}
|
||||
uploads = append(uploads, &response.Items[index])
|
||||
}
|
||||
if response.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
req.Page = response.OpcNextPage
|
||||
}
|
||||
return uploads, nil
|
||||
}
|
||||
155
backend/oracleobjectstorage/copy.go
Normal file
155
backend/oracleobjectstorage/copy.go
Normal file
@@ -0,0 +1,155 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement Copier is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
// This is stored with the remote path given
|
||||
// It returns the destination Object and a possible error
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := f.copy(ctx, dstObj, srcObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// copy does a server-side copy from dstObj <- srcObj
|
||||
//
|
||||
// If newInfo is nil then the metadata will be copied otherwise it
|
||||
// will be replaced with newInfo
|
||||
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
dstBucket, dstPath := dstObj.split()
|
||||
if dstBucket != srcBucket {
|
||||
exists, err := f.bucketExists(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
err = f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
copyObjectDetails := objectstorage.CopyObjectDetails{
|
||||
SourceObjectName: common.String(srcPath),
|
||||
DestinationRegion: common.String(dstObj.fs.opt.Region),
|
||||
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
|
||||
DestinationBucket: common.String(dstBucket),
|
||||
DestinationObjectName: common.String(dstPath),
|
||||
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
|
||||
}
|
||||
req := objectstorage.CopyObjectRequest{
|
||||
NamespaceName: common.String(srcObj.fs.opt.Namespace),
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
workRequestID := resp.OpcWorkRequestId
|
||||
timeout := time.Duration(f.opt.CopyTimeout)
|
||||
dstName := dstObj.String()
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
|
||||
// To enable server side copy object, customers will have to
|
||||
// grant policy to objectstorage service to manage object-family
|
||||
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
|
||||
// Another option to avoid the policy is to download and reupload the file.
|
||||
// This download upload will work for maximum file size limit of 5GB
|
||||
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
|
||||
client *objectstorage.ObjectStorageClient) error {
|
||||
|
||||
stateConf := &StateChangeConf{
|
||||
Pending: []string{
|
||||
string(objectstorage.WorkRequestStatusAccepted),
|
||||
string(objectstorage.WorkRequestStatusInProgress),
|
||||
string(objectstorage.WorkRequestStatusCanceling),
|
||||
},
|
||||
Target: []string{
|
||||
string(objectstorage.WorkRequestSummaryStatusCompleted),
|
||||
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
||||
string(objectstorage.WorkRequestStatusFailed),
|
||||
},
|
||||
Refresh: func() (interface{}, string, error) {
|
||||
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
||||
getWorkRequestRequest.WorkRequestId = wID
|
||||
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
||||
wr := &workRequestResponse.WorkRequest
|
||||
return workRequestResponse, string(wr.Status), err
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
|
||||
if e != nil {
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
|
||||
}
|
||||
|
||||
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
|
||||
if wr.Status == objectstorage.WorkRequestStatusFailed {
|
||||
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
|
||||
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
|
||||
req := objectstorage.ListWorkRequestErrorsRequest{}
|
||||
req.WorkRequestId = workRequestID
|
||||
res, err := client.ListWorkRequestErrors(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
allErrs := make([]string, 0)
|
||||
for _, errs := range res.Items {
|
||||
allErrs = append(allErrs, *errs.Message)
|
||||
}
|
||||
|
||||
errorMessage := strings.Join(allErrs, "\n")
|
||||
return errorMessage, nil
|
||||
}
|
||||
621
backend/oracleobjectstorage/object.go
Normal file
621
backend/oracleobjectstorage/object.go
Normal file
@@ -0,0 +1,621 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Object Interface Implementation
|
||||
// ------------------------------------------------------------
|
||||
|
||||
const (
|
||||
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
|
||||
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
|
||||
// StandardTier object storage tier
|
||||
ociMetaPrefix = "opc-meta-"
|
||||
)
|
||||
|
||||
var archive = "archive"
|
||||
var infrequentAccess = "infrequentaccess"
|
||||
var standard = "standard"
|
||||
|
||||
var storageTierMap = map[string]*string{
|
||||
archive: &archive,
|
||||
infrequentAccess: &infrequentAccess,
|
||||
standard: &standard,
|
||||
}
|
||||
|
||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||
|
||||
// Object describes a oci bucket object
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
md5 string // MD5 hash if known
|
||||
bytes int64 // Size of the object
|
||||
lastModified time.Time // The modified time of the object if known
|
||||
meta map[string]string // The object metadata if known - may be nil
|
||||
mimeType string // Content-Type of the object
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
storageTier *string // e.g. Standard
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
fs.Debugf(o, "trying to read metadata %v", o.remote)
|
||||
if o.meta != nil {
|
||||
return nil
|
||||
}
|
||||
info, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.decodeMetaDataHead(info)
|
||||
}
|
||||
|
||||
// headObject gets the metadata from the object unconditionally
|
||||
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
|
||||
bucketName, objectPath := o.split()
|
||||
req := objectstorage.HeadObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
response, err = o.fs.srv.HeadObject(ctx, req)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
return &response, err
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
|
||||
return o.setMetaData(
|
||||
info.ContentLength,
|
||||
info.ContentMd5,
|
||||
info.ContentType,
|
||||
info.LastModified,
|
||||
info.StorageTier,
|
||||
info.OpcMeta)
|
||||
}
|
||||
|
||||
func (o *Object) setMetaData(
|
||||
contentLength *int64,
|
||||
contentMd5 *string,
|
||||
contentType *string,
|
||||
lastModified *common.SDKTime,
|
||||
storageTier interface{},
|
||||
meta map[string]string) error {
|
||||
|
||||
if contentLength != nil {
|
||||
o.bytes = *contentLength
|
||||
}
|
||||
if contentMd5 != nil {
|
||||
md5, err := o.base64ToMd5(*contentMd5)
|
||||
if err == nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.meta = meta
|
||||
if o.meta == nil {
|
||||
o.meta = map[string]string{}
|
||||
}
|
||||
// Read MD5 from metadata if present
|
||||
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
|
||||
md5, err := o.base64ToMd5(md5sumBase64)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
if lastModified == nil {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = lastModified.Time
|
||||
}
|
||||
if contentType != nil {
|
||||
o.mimeType = *contentType
|
||||
}
|
||||
if storageTier == nil || storageTier == "" {
|
||||
o.storageTier = storageTierMap[standard]
|
||||
} else {
|
||||
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
|
||||
o.storageTier = storageTierMap[tier]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
|
||||
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
|
||||
return "", err
|
||||
} else if len(md5sumBytes) != 16 {
|
||||
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
|
||||
}
|
||||
return hex.EncodeToString(md5sumBytes), nil
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// GetTier returns storage class as string
|
||||
func (o *Object) GetTier() string {
|
||||
if o.storageTier == nil || *o.storageTier == "" {
|
||||
return standard
|
||||
}
|
||||
return *o.storageTier
|
||||
}
|
||||
|
||||
// SetTier performs changing storage class
|
||||
func (o *Object) SetTier(tier string) (err error) {
|
||||
ctx := context.TODO()
|
||||
tier = strings.ToLower(tier)
|
||||
bucketName, bucketPath := o.split()
|
||||
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier %v ", tier)
|
||||
}
|
||||
|
||||
req := objectstorage.UpdateObjectStorageTierRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
|
||||
ObjectName: common.String(bucketPath),
|
||||
StorageTier: tierEnum,
|
||||
},
|
||||
}
|
||||
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.storageTier = storageTierMap[tier]
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return ""
|
||||
}
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
// Convert base64 encoded md5 into lower case hex
|
||||
if o.md5 == "" {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return o.md5, nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned to the http headers
|
||||
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||
if o.fs.ci.UseServerModTime {
|
||||
return o.lastModified
|
||||
}
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
// read mtime out of metadata if available
|
||||
d, ok := o.meta[metaMtime]
|
||||
if !ok || d == "" {
|
||||
return o.lastModified
|
||||
}
|
||||
modTime, err := swift.FloatStringToTime(d)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read mtime from object: %v", err)
|
||||
return o.lastModified
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
|
||||
_, err = o.fs.Copy(ctx, o, o.remote)
|
||||
return err
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.DeleteObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.DeleteObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Open object file
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
req := objectstorage.GetObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = o.fs.srv.GetObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// read size from ContentLength or ContentRange
|
||||
bytes := resp.ContentLength
|
||||
if resp.ContentRange != nil {
|
||||
var contentRange = *resp.ContentRange
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
bytes = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
}
|
||||
err = o.decodeMetaDataObject(&resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.bytes = *bytes
|
||||
return resp.HTTPResponse().Body, nil
|
||||
}
|
||||
|
||||
// Update an object if it has changed
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucketName, bucketPath := o.split()
|
||||
err = o.fs.makeBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// determine if we like upload single or multipart.
|
||||
size := src.Size()
|
||||
multipart := size >= int64(o.fs.opt.UploadCutoff)
|
||||
|
||||
// Set the mtime in the metadata
|
||||
modTime := src.ModTime(ctx)
|
||||
metadata := map[string]string{
|
||||
metaMtime: swift.TimeToFloatString(modTime),
|
||||
}
|
||||
|
||||
// read the md5sum if available
|
||||
// - for non-multipart
|
||||
// - so we can add a ContentMD5
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
|
||||
// - for multipart provided checksums aren't disabled
|
||||
// - so we can add the md5sum in the metadata as metaMD5Hash
|
||||
var md5sumBase64 string
|
||||
var md5sumHex string
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(md5sumHex)
|
||||
if err == nil {
|
||||
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
|
||||
if multipart && !o.fs.opt.DisableChecksum {
|
||||
// Set the md5sum as metadata on the object if
|
||||
// - a multipart upload
|
||||
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
|
||||
// provided checksums aren't disabled
|
||||
metadata[metaMD5Hash] = md5sumBase64
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Guess the content type
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
|
||||
if multipart {
|
||||
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||
uploadRequest := transfer.UploadRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PartSize: common.Int64(chunkSize),
|
||||
AllowMultipartUploads: common.Bool(true),
|
||||
AllowParrallelUploads: common.Bool(true),
|
||||
ObjectStorageClient: o.fs.srv,
|
||||
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
|
||||
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
|
||||
Metadata: metadataWithOpcPrefix(metadata),
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
}
|
||||
uploadMgr := transfer.NewUploadManager()
|
||||
var uploadID = ""
|
||||
|
||||
defer atexit.OnError(&err, func() {
|
||||
if uploadID == "" {
|
||||
return
|
||||
}
|
||||
if o.fs.opt.LeavePartsOnError {
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Cancelling multipart upload")
|
||||
errCancel := o.fs.abortMultiPartUpload(
|
||||
context.Background(),
|
||||
bucketName,
|
||||
bucketPath,
|
||||
uploadID)
|
||||
if errCancel != nil {
|
||||
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
|
||||
}
|
||||
})()
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
|
||||
var httpResponse *http.Response
|
||||
if err == nil {
|
||||
if uploadResponse.Type == transfer.MultipartUpload {
|
||||
if uploadResponse.MultipartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
|
||||
}
|
||||
} else {
|
||||
if uploadResponse.SinglepartUploadResponse != nil {
|
||||
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
uploadID := ""
|
||||
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
|
||||
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
|
||||
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
|
||||
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, httpResponse, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "multipart streaming upload failed %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
req := objectstorage.PutObjectRequest{
|
||||
NamespaceName: common.String(o.fs.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
ContentType: common.String(mimeType),
|
||||
PutObjectBody: io.NopCloser(in),
|
||||
OpcMeta: metadata,
|
||||
}
|
||||
if size >= 0 {
|
||||
req.ContentLength = common.Int64(size)
|
||||
}
|
||||
if o.fs.opt.StorageTier != "" {
|
||||
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
|
||||
if !ok {
|
||||
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
|
||||
}
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Errorf(o, "put object failed %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
return o.readMetaData(ctx)
|
||||
}
|
||||
|
||||
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.CacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.ContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.OpcMeta[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.RangeOption, *fs.SeekOption:
|
||||
_, value := option.Header()
|
||||
req.Range = &value
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "cache-control":
|
||||
req.HttpResponseCacheControl = common.String(value)
|
||||
case "content-disposition":
|
||||
req.HttpResponseContentDisposition = common.String(value)
|
||||
case "content-encoding":
|
||||
req.HttpResponseContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.HttpResponseContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.HttpResponseContentType = common.String(value)
|
||||
case "range":
|
||||
// do nothing
|
||||
default:
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
|
||||
// Apply upload options
|
||||
for _, option := range options {
|
||||
key, value := option.Header()
|
||||
lowerKey := strings.ToLower(key)
|
||||
switch lowerKey {
|
||||
case "":
|
||||
// ignore
|
||||
case "content-encoding":
|
||||
req.ContentEncoding = common.String(value)
|
||||
case "content-language":
|
||||
req.ContentLanguage = common.String(value)
|
||||
case "content-type":
|
||||
req.ContentType = common.String(value)
|
||||
default:
|
||||
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
req.Metadata[lowerKey] = value
|
||||
} else {
|
||||
fs.Errorf(o, "Don't know how to set key %q on upload", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func metadataWithOpcPrefix(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
for lowerKey, value := range src {
|
||||
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
|
||||
dst[ociMetaPrefix+lowerKey] = value
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
242
backend/oracleobjectstorage/options.go
Normal file
242
backend/oracleobjectstorage/options.go
Normal file
@@ -0,0 +1,242 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSizeForCopy = 4768 * 1024 * 1024
|
||||
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
const (
|
||||
userPrincipal = "user_principal_auth"
|
||||
instancePrincipal = "instance_principal_auth"
|
||||
resourcePrincipal = "resource_principal_auth"
|
||||
environmentAuth = "env_auth"
|
||||
noAuth = "no_auth"
|
||||
|
||||
userPrincipalHelpText = `use an OCI user and an API key for authentication.
|
||||
you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
|
||||
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
|
||||
|
||||
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
|
||||
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
|
||||
|
||||
resourcePrincipalHelpText = `use resource principals to make API calls`
|
||||
|
||||
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
|
||||
|
||||
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
|
||||
)
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
return []fs.Option{{
|
||||
Name: fs.ConfigProvider,
|
||||
Help: "Choose your Auth Provider",
|
||||
Required: true,
|
||||
Default: environmentAuth,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: environmentAuth,
|
||||
Help: environmentAuthHelpText,
|
||||
}, {
|
||||
Value: userPrincipal,
|
||||
Help: userPrincipalHelpText,
|
||||
}, {
|
||||
Value: instancePrincipal,
|
||||
Help: instancePrincipalHelpText,
|
||||
}, {
|
||||
Value: resourcePrincipal,
|
||||
Help: resourcePrincipalHelpText,
|
||||
}, {
|
||||
Value: noAuth,
|
||||
Help: noAuthHelpText,
|
||||
}},
|
||||
}, {
|
||||
Name: "namespace",
|
||||
Help: "Object storage namespace",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "compartment",
|
||||
Help: "Object storage compartment OCID",
|
||||
Provider: "!no_auth",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Object storage Region",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
|
||||
Required: false,
|
||||
}, {
|
||||
Name: "config_file",
|
||||
Help: "Path to OCI config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "~/.oci/config",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "~/.oci/config",
|
||||
Help: "oci configuration file location",
|
||||
}},
|
||||
}, {
|
||||
Name: "config_profile",
|
||||
Help: "Profile name inside the oci config file",
|
||||
Provider: userPrincipal,
|
||||
Default: "Default",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
|
||||
Any files larger than this will be uploaded in chunks of chunk_size.
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: defaultUploadCutoff,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "chunk_size",
|
||||
Help: `Chunk size to use for uploading.
|
||||
|
||||
When uploading files larger than upload_cutoff or files with unknown
|
||||
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
|
||||
photos or google docs) they will be uploaded as multipart uploads
|
||||
using this chunk size.
|
||||
|
||||
Note that "upload_concurrency" chunks of this size are buffered
|
||||
in memory per transfer.
|
||||
|
||||
If you are transferring large files over high-speed links and you have
|
||||
enough memory, then increasing this will speed up the transfers.
|
||||
|
||||
Rclone will automatically increase the chunk size when uploading a
|
||||
large file of known size to stay below the 10,000 chunks limit.
|
||||
|
||||
Files of unknown size are uploaded with the configured
|
||||
chunk_size. Since the default chunk size is 5 MiB and there can be at
|
||||
most 10,000 chunks, this means that by default the maximum size of
|
||||
a file you can stream upload is 48 GiB. If you wish to stream upload
|
||||
larger files then you will need to increase chunk_size.
|
||||
|
||||
Increasing the chunk size decreases the accuracy of the progress
|
||||
statistics displayed with "-P" flag.
|
||||
`,
|
||||
Default: minChunkSize,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
this may help to speed up the transfers.`,
|
||||
Default: defaultUploadConcurrency,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_cutoff",
|
||||
Help: `Cutoff for switching to multipart copy.
|
||||
|
||||
Any files larger than this that need to be server-side copied will be
|
||||
copied in chunks of this size.
|
||||
|
||||
The minimum is 0 and the maximum is 5 GiB.`,
|
||||
Default: fs.SizeSuffix(maxSizeForCopy),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "copy_timeout",
|
||||
Help: `Timeout for copy.
|
||||
|
||||
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
|
||||
`,
|
||||
Default: defaultCopyTimeoutDuration,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't store MD5 checksum with object metadata.
|
||||
|
||||
Normally rclone will calculate the MD5 checksum of the input before
|
||||
uploading it so it can add it to metadata on the object. This is great
|
||||
for data integrity checking but can cause long delays for large files
|
||||
to start uploading.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Any UTF-8 character is valid in a key, however it can't handle
|
||||
// invalid UTF-8 and / have a special meaning.
|
||||
//
|
||||
// The SDK can't seem to handle uploading files called '.
|
||||
// - initial / encoding
|
||||
// - doubled / encoding
|
||||
// - trailing / encoding
|
||||
// so that OSS keys are always valid file names
|
||||
Default: encoder.EncodeInvalidUtf8 |
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeDot,
|
||||
}, {
|
||||
Name: "leave_parts_on_error",
|
||||
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
|
||||
|
||||
It should be set to true for resuming uploads across different sessions.
|
||||
|
||||
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
|
||||
additional costs if not cleaned up.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_bucket",
|
||||
Help: `If set, don't attempt to check the bucket exists or create it.
|
||||
|
||||
This can be useful when trying to minimise the number of transactions
|
||||
rclone does if you know the bucket exists already.
|
||||
|
||||
It can also be needed if the user you are using does not have bucket
|
||||
creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}}
|
||||
}
|
||||
695
backend/oracleobjectstorage/oracleobjectstorage.go
Normal file
695
backend/oracleobjectstorage/oracleobjectstorage.go
Normal file
@@ -0,0 +1,695 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
// Package oracleobjectstorage provides an interface to the OCI object storage system.
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "oracleobjectstorage",
|
||||
Description: "Oracle Cloud Infrastructure Object Storage",
|
||||
Prefix: "oos",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Options: newOptions(),
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote object storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *objectstorage.ObjectStorageClient // the connection to the object storage
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache for bucket creation status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// NewFs Initialize backend
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
// return an error with fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
func checkUploadChunkSize(cs fs.SizeSuffix) error {
|
||||
if cs < minChunkSize {
|
||||
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadChunkSize(cs)
|
||||
if err == nil {
|
||||
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkUploadCutoff(cs fs.SizeSuffix) error {
|
||||
if cs > maxUploadCutoff {
|
||||
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
||||
err = checkUploadCutoff(cs)
|
||||
if err == nil {
|
||||
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement backed that represents a remote object storage server
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.rootBucket == "" {
|
||||
return "oos:root"
|
||||
}
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("oos:bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision of the remote
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
|
||||
if bucketName == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied from
|
||||
// the bucket and root supplied
|
||||
// (bucket, directory) is the starting directory
|
||||
// If prefix is set then it is removed from all file names
|
||||
// If addBucket is set then it adds the bucket to the start of the remotes generated
|
||||
// If recurse is set the function will recursively list
|
||||
// If limit is > 0 then it limits to that many files (must be less than 1000)
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
// if findFile is set it will look for files called (bucket, directory)
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
|
||||
fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
|
||||
delimiter := ""
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
}
|
||||
chunkSize := 1000
|
||||
if limit > 0 {
|
||||
chunkSize = limit
|
||||
}
|
||||
var request = objectstorage.ListObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucket),
|
||||
Prefix: common.String(directory),
|
||||
Limit: common.Int(chunkSize),
|
||||
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
|
||||
}
|
||||
if delimiter != "" {
|
||||
request.Delimiter = common.String(delimiter)
|
||||
}
|
||||
|
||||
for {
|
||||
var resp objectstorage.ListObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
resp, err = f.srv.ListObjects(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
if ociError, ok := err.(common.ServiceError); ok {
|
||||
// If it is a timeout then we want to retry that
|
||||
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
if f.rootBucket == "" {
|
||||
// if listing from the root ignore wrong region requests returning
|
||||
// empty directory
|
||||
if reqErr, ok := err.(common.ServiceError); ok {
|
||||
// 301 if wrong region for bucket
|
||||
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
|
||||
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
for _, commonPrefix := range resp.ListObjects.Prefixes {
|
||||
if commonPrefix == "" {
|
||||
fs.Logf(f, "Nil common prefix received")
|
||||
continue
|
||||
}
|
||||
remote := commonPrefix
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range resp.Objects {
|
||||
object := &resp.Objects[i]
|
||||
// Finish if file name no longer has prefix
|
||||
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
|
||||
// return nil
|
||||
//}
|
||||
remote := *object.Name
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
// fs.Debugf(f, "Odd name received %v", object.Name)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix):]
|
||||
// Check for directory
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if isDirectory && len(remote) > 1 {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
err = fn(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// end if no NextFileName
|
||||
if resp.NextStartWith == nil {
|
||||
break
|
||||
}
|
||||
request.Start = resp.NextStartWith
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
size := int64(0)
|
||||
if object.Size != nil {
|
||||
size = *object.Size
|
||||
}
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.opt.Provider == noAuth {
|
||||
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
|
||||
}
|
||||
var request = objectstorage.ListBucketsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
}
|
||||
var resp objectstorage.ListBucketsResponse
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.ListBuckets(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
bucketName := f.opt.Enc.ToStandardName(*item.Name)
|
||||
f.cache.MarkOK(bucketName)
|
||||
d := fs.NewDir(bucketName, item.TimeCreated.Time)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if resp.OpcNextPage == nil {
|
||||
break
|
||||
}
|
||||
request.Page = resp.OpcNextPage
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
// Set info but not meta
|
||||
if info.TimeModified == nil {
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
o.lastModified = time.Now()
|
||||
} else {
|
||||
o.lastModified = info.TimeModified.Time
|
||||
}
|
||||
if info.Md5 != nil {
|
||||
md5, err := o.base64ToMd5(*info.Md5)
|
||||
if err != nil {
|
||||
o.md5 = md5
|
||||
}
|
||||
}
|
||||
o.bytes = *info.Size
|
||||
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
|
||||
} else {
|
||||
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
// Copy the reader in to the new object which is returned
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
bucketName, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucketName)
|
||||
}
|
||||
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
|
||||
if f.opt.NoCheckBucket {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Create(bucketName, func() error {
|
||||
details := objectstorage.CreateBucketDetails{
|
||||
Name: common.String(bucketName),
|
||||
CompartmentId: common.String(f.opt.Compartment),
|
||||
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
|
||||
}
|
||||
req := objectstorage.CreateBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
CreateBucketDetails: details,
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CreateBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
|
||||
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
|
||||
}
|
||||
if svcErr, ok := err.(common.ServiceError); ok {
|
||||
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, func() (bool, error) {
|
||||
return f.bucketExists(ctx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
// Check if the bucket exists
|
||||
//
|
||||
// NB this can return incorrect results if called immediately after bucket deletion
|
||||
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
|
||||
req := objectstorage.HeadBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.HeadBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if err, ok := err.(common.ServiceError); ok {
|
||||
if err.GetHTTPStatusCode() == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
bucketName, directory := f.split(dir)
|
||||
if bucketName == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
return f.cache.Remove(bucketName, func() error {
|
||||
req := objectstorage.DeleteBucketRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.DeleteBucket(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Infof(f, "Bucket %q deleted", bucketName)
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
|
||||
if uploadID == "" {
|
||||
return nil
|
||||
}
|
||||
request := objectstorage.AbortMultipartUploadRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(bucketPath),
|
||||
UploadId: common.String(uploadID),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.AbortMultipartUpload(ctx, request)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
|
||||
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
|
||||
uploads []*objectstorage.MultipartUpload) (err error) {
|
||||
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
|
||||
for _, upload := range uploads {
|
||||
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
|
||||
age := time.Since(upload.TimeCreated.Time)
|
||||
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
|
||||
upload.TimeCreated, age)
|
||||
if age > maxAge {
|
||||
fs.Infof(f, "removing %s", what)
|
||||
if operations.SkipDestructive(ctx, what, "remove pending upload") {
|
||||
continue
|
||||
}
|
||||
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
|
||||
if ignoreErr != nil {
|
||||
// fs.Debugf(f, "ignoring error %s", ignoreErr)
|
||||
}
|
||||
} else {
|
||||
// fs.Debugf(f, "ignoring %s", what)
|
||||
}
|
||||
} else {
|
||||
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads
|
||||
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
|
||||
uploadsMap, err := f.listMultipartUploadsAll(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for bucketName, uploads := range uploadsMap {
|
||||
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
|
||||
err = cleanErr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// CleanUp removes all pending multipart uploads older than 24 hours
|
||||
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||
return f.cleanUp(ctx, 24*time.Hour)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Implement ListRer is an optional interfaces for Fs
|
||||
//------------------------------------------------------------
|
||||
|
||||
/*
|
||||
ListR lists the objects and directories of the Fs starting
|
||||
from dir recursively into out.
|
||||
|
||||
dir should be "" to start from the root, and should not
|
||||
have trailing slashes.
|
||||
|
||||
This should return ErrDirNotFound if the directory isn't
|
||||
found.
|
||||
|
||||
It should call callback for each tranche of entries read.
|
||||
These need not be returned in any particular order. If
|
||||
callback returns an error then the listing will stop
|
||||
immediately.
|
||||
|
||||
Don't implement this unless you have a more efficient way
|
||||
of listing recursively that doing a directory traversal.
|
||||
*/
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucketName, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucketName == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketName := entry.Remote()
|
||||
err = listR(bucketName, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucketName)
|
||||
}
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.GetTierer = &Object{}
|
||||
_ fs.SetTierer = &Object{}
|
||||
)
|
||||
33
backend/oracleobjectstorage/oracleobjectstorage_test.go
Normal file
33
backend/oracleobjectstorage/oracleobjectstorage_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestOracleObjectStorage:",
|
||||
TiersToTest: []string{"standard", "archive"},
|
||||
NilObject: (*Object)(nil),
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: minChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadCutoff(cs)
|
||||
}
|
||||
|
||||
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
@@ -0,0 +1,7 @@
|
||||
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package oracleobjectstorage
|
||||
362
backend/oracleobjectstorage/waiter.go
Normal file
362
backend/oracleobjectstorage/waiter.go
Normal file
@@ -0,0 +1,362 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
var refreshGracePeriod = 30 * time.Second
|
||||
|
||||
// StateRefreshFunc is a function type used for StateChangeConf that is
|
||||
// responsible for refreshing the item being watched for a state change.
|
||||
//
|
||||
// It returns three results. `result` is any object that will be returned
|
||||
// as the final object after waiting for state change. This allows you to
|
||||
// return the final updated object, for example an EC2 instance after refreshing
|
||||
// it. A nil result represents not found.
|
||||
//
|
||||
// `state` is the latest state of that object. And `err` is any error that
|
||||
// may have happened while refreshing the state.
|
||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
||||
|
||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||
type StateChangeConf struct {
|
||||
Delay time.Duration // Wait this time before starting checks
|
||||
Pending []string // States that are "allowed" and will continue trying
|
||||
Refresh StateRefreshFunc // Refreshes the current state
|
||||
Target []string // Target state
|
||||
Timeout time.Duration // The amount of time to wait before timeout
|
||||
MinTimeout time.Duration // Smallest time to wait before refreshes
|
||||
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
|
||||
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
|
||||
|
||||
// This is to work around inconsistent APIs
|
||||
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
|
||||
}
|
||||
|
||||
// WaitForStateContext watches an object and waits for it to achieve the state
|
||||
// specified in the configuration using the specified Refresh() func,
|
||||
// waiting the number of seconds specified in the timeout configuration.
|
||||
//
|
||||
// If the Refresh function returns an error, exit immediately with that error.
|
||||
//
|
||||
// If the Refresh function returns a state other than the Target state or one
|
||||
// listed in Pending, return immediately with an error.
|
||||
//
|
||||
// If the Timeout is exceeded before reaching the Target state, return an
|
||||
// error.
|
||||
//
|
||||
// Otherwise, the result is the result of the first call to the Refresh function to
|
||||
// reach the target state.
|
||||
//
|
||||
// Cancellation from the passed in context will cancel the refresh loop
|
||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
|
||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||
|
||||
notfoundTick := 0
|
||||
targetOccurrence := 0
|
||||
|
||||
// Set a default for times to check for not found
|
||||
if conf.NotFoundChecks == 0 {
|
||||
conf.NotFoundChecks = 20
|
||||
}
|
||||
|
||||
if conf.ContinuousTargetOccurrence == 0 {
|
||||
conf.ContinuousTargetOccurrence = 1
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Result interface{}
|
||||
State string
|
||||
Error error
|
||||
Done bool
|
||||
}
|
||||
|
||||
// Read every result from the refresh loop, waiting for a positive result.Done.
|
||||
resCh := make(chan Result, 1)
|
||||
// cancellation channel for the refresh loop
|
||||
cancelCh := make(chan struct{})
|
||||
|
||||
result := Result{}
|
||||
|
||||
go func() {
|
||||
defer close(resCh)
|
||||
|
||||
select {
|
||||
case <-time.After(conf.Delay):
|
||||
case <-cancelCh:
|
||||
return
|
||||
}
|
||||
|
||||
// start with 0 delay for the first loop
|
||||
var wait time.Duration
|
||||
|
||||
for {
|
||||
// store the last result
|
||||
resCh <- result
|
||||
|
||||
// wait and watch for cancellation
|
||||
select {
|
||||
case <-cancelCh:
|
||||
return
|
||||
case <-time.After(wait):
|
||||
// first round had no wait
|
||||
if wait == 0 {
|
||||
wait = 100 * time.Millisecond
|
||||
}
|
||||
}
|
||||
|
||||
res, currentState, err := conf.Refresh()
|
||||
result = Result{
|
||||
Result: res,
|
||||
State: currentState,
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
|
||||
// If we're waiting for the absence of a thing, then return
|
||||
if res == nil && len(conf.Target) == 0 {
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
// If we didn't find the resource, check if we have been
|
||||
// not finding it for a while, and if so, report an error.
|
||||
notfoundTick++
|
||||
if notfoundTick > conf.NotFoundChecks {
|
||||
result.Error = &NotFoundError{
|
||||
LastError: err,
|
||||
Retries: notfoundTick,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Reset the counter for when a resource isn't found
|
||||
notfoundTick = 0
|
||||
found := false
|
||||
|
||||
for _, allowed := range conf.Target {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence++
|
||||
if conf.ContinuousTargetOccurrence == targetOccurrence {
|
||||
result.Done = true
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
for _, allowed := range conf.Pending {
|
||||
if currentState == allowed {
|
||||
found = true
|
||||
targetOccurrence = 0
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found && len(conf.Pending) > 0 {
|
||||
result.Error = &UnexpectedStateError{
|
||||
LastError: err,
|
||||
State: result.State,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
resCh <- result
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Wait between refreshes using exponential backoff, except when
|
||||
// waiting for the target state to reoccur.
|
||||
if targetOccurrence == 0 {
|
||||
wait *= 2
|
||||
}
|
||||
|
||||
// If a poll interval has been specified, choose that interval.
|
||||
// Otherwise, bound the default value.
|
||||
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
|
||||
wait = conf.PollInterval
|
||||
} else {
|
||||
if wait < conf.MinTimeout {
|
||||
wait = conf.MinTimeout
|
||||
} else if wait > 10*time.Second {
|
||||
wait = 10 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
|
||||
}
|
||||
}()
|
||||
|
||||
// store the last value result from the refresh loop
|
||||
lastResult := Result{}
|
||||
|
||||
timeout := time.After(conf.Timeout)
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
// channel closed, so return the last result
|
||||
if !ok {
|
||||
return lastResult.Result, lastResult.Error
|
||||
}
|
||||
|
||||
// we reached the intended state
|
||||
if r.Done {
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
// still waiting, store the last result
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
close(cancelCh)
|
||||
return nil, ctx.Err()
|
||||
case <-timeout:
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
|
||||
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
|
||||
|
||||
// cancel the goroutine and start our grace period timer
|
||||
close(cancelCh)
|
||||
timeout := time.After(refreshGracePeriod)
|
||||
|
||||
// we need a for loop and a label to break on, because we may have
|
||||
// an extra response value to read, but still want to wait for the
|
||||
// channel to close.
|
||||
forSelect:
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-resCh:
|
||||
if r.Done {
|
||||
// the last refresh loop reached the desired state
|
||||
return r.Result, r.Error
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// the goroutine returned
|
||||
break forSelect
|
||||
}
|
||||
|
||||
// target state not reached, save the result for the
|
||||
// TimeoutError and wait for the channel to close
|
||||
lastResult = r
|
||||
case <-ctx.Done():
|
||||
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
|
||||
break forSelect
|
||||
case <-timeout:
|
||||
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
|
||||
break forSelect
|
||||
}
|
||||
}
|
||||
|
||||
return nil, &TimeoutError{
|
||||
LastError: lastResult.Error,
|
||||
LastState: lastResult.State,
|
||||
Timeout: conf.Timeout,
|
||||
ExpectedState: conf.Target,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotFoundError resource not found error
|
||||
type NotFoundError struct {
|
||||
LastError error
|
||||
LastRequest interface{}
|
||||
LastResponse interface{}
|
||||
Message string
|
||||
Retries int
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Error() string {
|
||||
if e.Message != "" {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
if e.Retries > 0 {
|
||||
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
|
||||
}
|
||||
|
||||
return "couldn't find resource"
|
||||
}
|
||||
|
||||
func (e *NotFoundError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
|
||||
type UnexpectedStateError struct {
|
||||
LastError error
|
||||
State string
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"unexpected state '%s', wanted target '%s'. last error: %s",
|
||||
e.State,
|
||||
strings.Join(e.ExpectedState, ", "),
|
||||
e.LastError,
|
||||
)
|
||||
}
|
||||
|
||||
func (e *UnexpectedStateError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
|
||||
// TimeoutError is returned when WaitForState times out
|
||||
type TimeoutError struct {
|
||||
LastError error
|
||||
LastState string
|
||||
Timeout time.Duration
|
||||
ExpectedState []string
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Error() string {
|
||||
expectedState := "resource to be gone"
|
||||
if len(e.ExpectedState) > 0 {
|
||||
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
|
||||
}
|
||||
|
||||
extraInfo := make([]string, 0)
|
||||
if e.LastState != "" {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
|
||||
}
|
||||
if e.Timeout > 0 {
|
||||
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
|
||||
}
|
||||
|
||||
suffix := ""
|
||||
if len(extraInfo) > 0 {
|
||||
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
|
||||
}
|
||||
|
||||
if e.LastError != nil {
|
||||
return fmt.Sprintf("timeout while waiting for %s%s: %s",
|
||||
expectedState, suffix, e.LastError)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("timeout while waiting for %s%s",
|
||||
expectedState, suffix)
|
||||
}
|
||||
|
||||
func (e *TimeoutError) Unwrap() error {
|
||||
return e.LastError
|
||||
}
|
||||
361
backend/s3/s3.go
361
backend/s3/s3.go
@@ -15,7 +15,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -26,6 +25,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
@@ -57,6 +58,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -64,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -116,6 +118,9 @@ func init() {
|
||||
}, {
|
||||
Value: "IDrive",
|
||||
Help: "IDrive e2",
|
||||
}, {
|
||||
Value: "IONOS",
|
||||
Help: "IONOS Cloud",
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
@@ -146,6 +151,9 @@ func init() {
|
||||
}, {
|
||||
Value: "Wasabi",
|
||||
Help: "Wasabi Object Storage",
|
||||
}, {
|
||||
Value: "Qiniu",
|
||||
Help: "Qiniu Object Storage (Kodo)",
|
||||
}, {
|
||||
Value: "Other",
|
||||
Help: "Any other S3 compatible provider",
|
||||
@@ -384,10 +392,52 @@ func init() {
|
||||
Value: "auto",
|
||||
Help: "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.",
|
||||
}},
|
||||
}, {
|
||||
// References:
|
||||
// https://developer.qiniu.com/kodo/4088/s3-access-domainname
|
||||
Name: "region",
|
||||
Help: "Region to connect to.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cn-east-1",
|
||||
Help: "The default endpoint - a good choice if you are unsure.\nEast China Region 1.\nNeeds location constraint cn-east-1.",
|
||||
}, {
|
||||
Value: "cn-east-2",
|
||||
Help: "East China Region 2.\nNeeds location constraint cn-east-2.",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "North China Region 1.\nNeeds location constraint cn-north-1.",
|
||||
}, {
|
||||
Value: "cn-south-1",
|
||||
Help: "South China Region 1.\nNeeds location constraint cn-south-1.",
|
||||
}, {
|
||||
Value: "us-north-1",
|
||||
Help: "North America Region.\nNeeds location constraint us-north-1.",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Southeast Asia Region 1.\nNeeds location constraint ap-southeast-1.",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region where your bucket will be created and your data stored.\n",
|
||||
Provider: "IONOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "de",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "eu-central-2",
|
||||
Help: "Berlin, Germany",
|
||||
}, {
|
||||
Value: "eu-south-2",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -698,6 +748,20 @@ func init() {
|
||||
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
|
||||
Help: "Singapore Single Site Private Endpoint",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
|
||||
Provider: "IONOS",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3-eu-central-1.ionoscloud.com",
|
||||
Help: "Frankfurt, Germany",
|
||||
}, {
|
||||
Value: "s3-eu-central-2.ionoscloud.com",
|
||||
Help: "Berlin, Germany",
|
||||
}, {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
@@ -998,10 +1062,37 @@ func init() {
|
||||
Value: "nz.s3.rackcorp.com",
|
||||
Help: "Auckland (New Zealand) Endpoint",
|
||||
}},
|
||||
}, {
|
||||
// Qiniu endpoints: https://developer.qiniu.com/kodo/4088/s3-access-domainname
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Qiniu Object Storage.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "s3-cn-east-1.qiniucs.com",
|
||||
Help: "East China Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-cn-east-2.qiniucs.com",
|
||||
Help: "East China Endpoint 2",
|
||||
}, {
|
||||
Value: "s3-cn-north-1.qiniucs.com",
|
||||
Help: "North China Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-cn-south-1.qiniucs.com",
|
||||
Help: "South China Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-us-north-1.qiniucs.com",
|
||||
Help: "North America Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-ap-southeast-1.qiniucs.com",
|
||||
Help: "Southeast Asia Endpoint 1",
|
||||
}, {
|
||||
Value: "s3-ap-northeast-1.qiniucs.com",
|
||||
Help: "Northeast Asia Endpoint 1",
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1036,15 +1127,39 @@ func init() {
|
||||
Provider: "LyveCloud",
|
||||
}, {
|
||||
Value: "s3.wasabisys.com",
|
||||
Help: "Wasabi US East endpoint",
|
||||
Help: "Wasabi US East 1 (N. Virginia)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-east-2.wasabisys.com",
|
||||
Help: "Wasabi US East 2 (N. Virginia)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-central-1.wasabisys.com",
|
||||
Help: "Wasabi US Central 1 (Texas)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.us-west-1.wasabisys.com",
|
||||
Help: "Wasabi US West endpoint",
|
||||
Help: "Wasabi US West 1 (Oregon)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ca-central-1.wasabisys.com",
|
||||
Help: "Wasabi CA Central 1 (Toronto)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-central-1.wasabisys.com",
|
||||
Help: "Wasabi EU Central endpoint",
|
||||
Help: "Wasabi EU Central 1 (Amsterdam)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-central-2.wasabisys.com",
|
||||
Help: "Wasabi EU Central 2 (Frankfurt)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-west-1.wasabisys.com",
|
||||
Help: "Wasabi EU West 1 (London)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.eu-west-2.wasabisys.com",
|
||||
Help: "Wasabi EU West 2 (Paris)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-northeast-1.wasabisys.com",
|
||||
@@ -1054,6 +1169,14 @@ func init() {
|
||||
Value: "s3.ap-northeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-1.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 1 (Singapore)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ap-southeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 2 (Sydney)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
@@ -1408,10 +1531,36 @@ func init() {
|
||||
Value: "nz",
|
||||
Help: "Auckland (New Zealand) Region",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "cn-east-1",
|
||||
Help: "East China Region 1",
|
||||
}, {
|
||||
Value: "cn-east-2",
|
||||
Help: "East China Region 2",
|
||||
}, {
|
||||
Value: "cn-north-1",
|
||||
Help: "North China Region 1",
|
||||
}, {
|
||||
Value: "cn-south-1",
|
||||
Help: "South China Region 1",
|
||||
}, {
|
||||
Value: "us-north-1",
|
||||
Help: "North America Region 1",
|
||||
}, {
|
||||
Value: "ap-southeast-1",
|
||||
Help: "Southeast Asia Region 1",
|
||||
}, {
|
||||
Value: "ap-northeast-1",
|
||||
Help: "Northeast Asia Region 1",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1421,7 +1570,11 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
@@ -1475,7 +1628,11 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
isn't set then "acl" is used instead.
|
||||
|
||||
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
||||
header is added and the default (private) will be used.
|
||||
`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
@@ -1535,8 +1692,21 @@ isn't set then "acl" is used instead.`,
|
||||
Help: "arn:aws:kms:*",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
|
||||
|
||||
Alternatively you can provide --sse-customer-key-base64.`,
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_base64",
|
||||
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
|
||||
|
||||
Alternatively you can provide --sse-customer-key.`,
|
||||
Provider: "AWS,Ceph,ChinaMobile,Minio",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
@@ -1665,6 +1835,24 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "GLACIER",
|
||||
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Qiniu.",
|
||||
Provider: "Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "LINE",
|
||||
Help: "Infrequent access storage mode",
|
||||
}, {
|
||||
Value: "GLACIER",
|
||||
Help: "Archive storage mode",
|
||||
}, {
|
||||
Value: "DEEP_ARCHIVE",
|
||||
Help: "Deep archive storage mode",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
@@ -2017,6 +2205,36 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "might_gzip",
|
||||
Help: strings.ReplaceAll(`Set this if the backend might gzip objects.
|
||||
|
||||
Normally providers will not alter objects when they are downloaded. If
|
||||
an object was not uploaded with |Content-Encoding: gzip| then it won't
|
||||
be set on download.
|
||||
|
||||
However some providers may gzip objects even if they weren't uploaded
|
||||
with |Content-Encoding: gzip| (eg Cloudflare).
|
||||
|
||||
A symptom of this would be receiving errors like
|
||||
|
||||
ERROR corrupted on transfer: sizes differ NNN vs MMM
|
||||
|
||||
If you set this flag and rclone downloads an object with
|
||||
Content-Encoding: gzip set and chunked transfer encoding, then rclone
|
||||
will decompress the object on the fly.
|
||||
|
||||
If this is set to unset (the default) then rclone will choose
|
||||
according to the provider setting what to apply, but you can override
|
||||
rclone's choice here.
|
||||
`, "|", "`"),
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_system_metadata",
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2111,6 +2329,7 @@ type Options struct {
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
@@ -2142,6 +2361,8 @@ type Options struct {
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -2501,10 +2722,12 @@ func setQuirks(opt *Options) {
|
||||
virtualHostStyle = true
|
||||
urlEncodeListings = true
|
||||
useMultipartEtag = true
|
||||
mightGzip = true // assume all providers might gzip until proven otherwise
|
||||
)
|
||||
switch opt.Provider {
|
||||
case "AWS":
|
||||
// No quirks
|
||||
mightGzip = false // Never auto gzips objects
|
||||
case "Alibaba":
|
||||
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
|
||||
case "HuaweiOBS":
|
||||
@@ -2537,6 +2760,10 @@ func setQuirks(opt *Options) {
|
||||
useMultipartEtag = false // untested
|
||||
case "IDrive":
|
||||
virtualHostStyle = false
|
||||
case "IONOS":
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
@@ -2573,6 +2800,9 @@ func setQuirks(opt *Options) {
|
||||
useMultipartEtag = false // untested
|
||||
case "Wasabi":
|
||||
// No quirks
|
||||
case "Qiniu":
|
||||
useMultipartEtag = false
|
||||
urlEncodeListings = false
|
||||
case "Other":
|
||||
listObjectsV2 = false
|
||||
virtualHostStyle = false
|
||||
@@ -2611,6 +2841,12 @@ func setQuirks(opt *Options) {
|
||||
opt.UseMultipartEtag.Valid = true
|
||||
opt.UseMultipartEtag.Value = useMultipartEtag
|
||||
}
|
||||
|
||||
// set MightGzip if not manually set
|
||||
if !opt.MightGzip.Valid {
|
||||
opt.MightGzip.Valid = true
|
||||
opt.MightGzip.Value = mightGzip
|
||||
}
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
@@ -2619,6 +2855,14 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// return a pointer to the string if non empty or nil if it is empty
|
||||
func stringPointerOrNil(s string) *string {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -2638,12 +2882,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Versions && opt.VersionAt.IsSet() {
|
||||
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
if opt.SSECustomerKeyBase64 != "" && opt.SSECustomerKey != "" {
|
||||
return nil, errors.New("s3: can't use sse_customer_key and sse_customer_key_base64 at the same time")
|
||||
} else if opt.SSECustomerKeyBase64 != "" {
|
||||
// Decode the base64-encoded key and store it in the SSECustomerKey field
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("s3: Could not decode sse_customer_key_base64: %w", err)
|
||||
}
|
||||
opt.SSECustomerKey = string(decoded)
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
|
||||
// calculate CustomerKeyMD5 if not supplied
|
||||
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
|
||||
@@ -2718,7 +2969,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.Copy = nil
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
@@ -2821,19 +3071,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &bucket,
|
||||
}
|
||||
var resp *s3.GetBucketLocationOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.GetBucketLocation(&req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
region, err := s3manager.GetBucketRegion(ctx, f.ses, bucket, "", func(r *request.Request) {
|
||||
r.Config.S3ForcePathStyle = aws.Bool(f.opt.ForcePathStyle)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
@@ -2947,8 +3191,11 @@ func (f *Fs) newV2List(req *s3.ListObjectsV2Input) bucketLister {
|
||||
// Do a V2 listing
|
||||
func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
|
||||
resp, err = ls.f.c.ListObjectsV2WithContext(ctx, &ls.req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ls.req.ContinuationToken = resp.NextContinuationToken
|
||||
return resp, nil, err
|
||||
return resp, nil, nil
|
||||
}
|
||||
|
||||
// URL Encode the listings
|
||||
@@ -3505,7 +3752,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &f.opt.BucketACL,
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -3570,7 +3817,7 @@ func pathEscape(s string) string {
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
@@ -4448,7 +4695,15 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
|
||||
o.lastModified = time.Now()
|
||||
fs.Logf(o, "Failed to read last modified")
|
||||
} else {
|
||||
o.lastModified = *resp.LastModified
|
||||
// Try to keep the maximum precision in lastModified. If we read
|
||||
// it from listings then it may have millisecond precision, but
|
||||
// if we read it from a HEAD/GET request then it will have
|
||||
// second precision.
|
||||
equalToWithinOneSecond := o.lastModified.Truncate(time.Second).Equal((*resp.LastModified).Truncate(time.Second))
|
||||
newHasNs := (*resp.LastModified).Nanosecond() != 0
|
||||
if !equalToWithinOneSecond || newHasNs {
|
||||
o.lastModified = *resp.LastModified
|
||||
}
|
||||
}
|
||||
o.mimeType = aws.StringValue(resp.ContentType)
|
||||
|
||||
@@ -4540,23 +4795,12 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentLength := &resp.ContentLength
|
||||
if resp.Header.Get("Content-Range") != "" {
|
||||
var contentRange = resp.Header.Get("Content-Range")
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
contentLength = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
contentLength := rest.ParseSizeFromHeaders(resp.Header)
|
||||
if contentLength < 0 {
|
||||
fs.Debugf(o, "Failed to parse file size from headers")
|
||||
}
|
||||
|
||||
lastModified, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
lastModified, err := http.ParseTime(resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err)
|
||||
}
|
||||
@@ -4580,7 +4824,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
|
||||
var head = s3.HeadObjectOutput{
|
||||
ETag: header("Etag"),
|
||||
ContentLength: contentLength,
|
||||
ContentLength: &contentLength,
|
||||
LastModified: &lastModified,
|
||||
Metadata: metaData,
|
||||
CacheControl: header("Cache-Control"),
|
||||
@@ -4679,7 +4923,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
// Decompress body if necessary
|
||||
if aws.StringValue(resp.ContentEncoding) == "gzip" {
|
||||
if o.fs.opt.Decompress {
|
||||
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
|
||||
return readers.NewGzipReader(resp.Body)
|
||||
}
|
||||
o.fs.warnCompressed.Do(func() {
|
||||
@@ -4926,7 +5170,7 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
|
||||
// Can't upload zero length files like this for some reason
|
||||
r.Body = bytes.NewReader([]byte{})
|
||||
} else {
|
||||
r.SetStreamingBody(ioutil.NopCloser(in))
|
||||
r.SetStreamingBody(io.NopCloser(in))
|
||||
}
|
||||
r.SetContext(ctx)
|
||||
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
|
||||
@@ -5040,7 +5284,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
ACL: stringPointerOrNil(o.fs.opt.ACL),
|
||||
Key: &bucketPath,
|
||||
}
|
||||
|
||||
@@ -5054,6 +5298,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
for k, v := range meta {
|
||||
pv := aws.String(v)
|
||||
k = strings.ToLower(k)
|
||||
if o.fs.opt.NoSystemMetadata {
|
||||
req.Metadata[k] = pv
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case "cache-control":
|
||||
req.CacheControl = pv
|
||||
@@ -5174,6 +5422,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
}
|
||||
|
||||
// Check metadata keys and values are valid
|
||||
for key, value := range req.Metadata {
|
||||
if !httpguts.ValidHeaderFieldName(key) {
|
||||
fs.Errorf(o, "Dropping invalid metadata key %q", key)
|
||||
delete(req.Metadata, key)
|
||||
} else if value == nil {
|
||||
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
|
||||
delete(req.Metadata, key)
|
||||
} else if !httpguts.ValidHeaderFieldValue(*value) {
|
||||
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", *value, key)
|
||||
delete(req.Metadata, key)
|
||||
}
|
||||
}
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotEtag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
@@ -5318,6 +5580,9 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
|
||||
|
||||
// Set system metadata
|
||||
setMetadata := func(k string, v *string) {
|
||||
if o.fs.opt.NoSystemMetadata {
|
||||
return
|
||||
}
|
||||
if v == nil || *v == "" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -633,7 +632,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
|
||||
})
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -123,7 +122,10 @@ This enables the use of the following insecure ciphers and key exchange methods:
|
||||
- diffie-hellman-group-exchange-sha256
|
||||
- diffie-hellman-group-exchange-sha1
|
||||
|
||||
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
|
||||
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.
|
||||
|
||||
This must be false if you use either ciphers or key_exchange advanced options.
|
||||
`,
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -325,6 +327,46 @@ and pass variables with spaces in in quotes, eg
|
||||
|
||||
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
|
||||
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ciphers",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of ciphers to be used for session encryption, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q cipher.
|
||||
|
||||
This must not be set if use_insecure_cipher is true.
|
||||
|
||||
Example:
|
||||
|
||||
aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "key_exchange",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of key exchange algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q kex.
|
||||
|
||||
This must not be set if use_insecure_cipher is true.
|
||||
|
||||
Example:
|
||||
|
||||
sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "macs",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of MACs (message authentication code) algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q mac.
|
||||
|
||||
Example:
|
||||
|
||||
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -362,6 +404,9 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SetEnv fs.SpaceSepList `config:"set_env"`
|
||||
Ciphers fs.SpaceSepList `config:"ciphers"`
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -702,10 +747,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
sshConfig.HostKeyCallback = hostcallback
|
||||
}
|
||||
|
||||
if opt.UseInsecureCipher && (opt.Ciphers != nil || opt.KeyExchange != nil) {
|
||||
return nil, fmt.Errorf("use_insecure_cipher must be false if ciphers or key_exchange are set in advanced configuration")
|
||||
}
|
||||
|
||||
sshConfig.Config.SetDefaults()
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
|
||||
} else {
|
||||
if opt.Ciphers != nil {
|
||||
sshConfig.Config.Ciphers = opt.Ciphers
|
||||
}
|
||||
if opt.KeyExchange != nil {
|
||||
sshConfig.Config.KeyExchanges = opt.KeyExchange
|
||||
}
|
||||
}
|
||||
|
||||
if opt.MACs != nil {
|
||||
sshConfig.Config.MACs = opt.MACs
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
@@ -722,7 +782,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
|
||||
}
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
pubBytes, err := os.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
@@ -751,7 +811,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if keyFile != "" || opt.KeyPem != "" {
|
||||
var key []byte
|
||||
if opt.KeyPem == "" {
|
||||
key, err = ioutil.ReadFile(keyFile)
|
||||
key, err = os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
}
|
||||
@@ -782,7 +842,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||
certfile, err := os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
}
|
||||
@@ -915,20 +975,24 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
|
||||
err = session.Run(shellCmd)
|
||||
_ = session.Close()
|
||||
f.shellType = defaultShellType
|
||||
if err != nil {
|
||||
f.shellType = defaultShellType
|
||||
fs.Debugf(f, "Remote command failed: %v (stdout=%v) (stderr=%v)", err, bytes.TrimSpace(stdout.Bytes()), bytes.TrimSpace(stderr.Bytes()))
|
||||
} else {
|
||||
outBytes := stdout.Bytes()
|
||||
fs.Debugf(f, "Remote command result: %s", outBytes)
|
||||
outString := string(bytes.TrimSpace(stdout.Bytes()))
|
||||
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // If PowerShell: "Microsoft.PowerShell%ComSpec%"
|
||||
f.shellType = "powershell"
|
||||
} else if !strings.HasSuffix(outString, "%ComSpec%") { // If Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
|
||||
f.shellType = "cmd"
|
||||
} else { // If Unix: "%ComSpec%"
|
||||
f.shellType = "unix"
|
||||
}
|
||||
if outString != "" {
|
||||
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // PowerShell: "Microsoft.PowerShell%ComSpec%"
|
||||
f.shellType = "powershell"
|
||||
} else if !strings.HasSuffix(outString, "%ComSpec%") { // Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
|
||||
// Additional positive test, to avoid misdetection on unpredicted Unix shell variants
|
||||
s := strings.ToLower(outString)
|
||||
if strings.Contains(s, ".exe") || strings.Contains(s, ".com") {
|
||||
f.shellType = "cmd"
|
||||
}
|
||||
} // POSIX-based Unix shell: "%ComSpec%"
|
||||
} // fish Unix shell: ""
|
||||
}
|
||||
}
|
||||
// Save permanently in config to avoid the extra work next time
|
||||
@@ -1171,6 +1235,10 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||
err = c.sftpClient.Mkdir(dirPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
if err != nil {
|
||||
if os.IsExist(err) {
|
||||
fs.Debugf(f, "directory %q exists after Mkdir is attempted", dirPath)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -77,7 +77,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -479,7 +478,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open timezone db: %w", err)
|
||||
}
|
||||
tzdata, err := ioutil.ReadAll(timezone)
|
||||
tzdata, err := io.ReadAll(timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read timezone: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
@@ -119,7 +118,7 @@ func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
if f.grPos < f.seekPos {
|
||||
// Fast-forward.
|
||||
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
|
||||
_, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
229
backend/smb/connpool.go
Normal file
229
backend/smb/connpool.go
Normal file
@@ -0,0 +1,229 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
smb2 "github.com/hirochachacha/go-smb2"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
// dial starts a client connection to the given SMB server. It is a
|
||||
// convenience function that connects to the given network address,
|
||||
// initiates the SMB handshake, and then sets up a Client.
|
||||
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
dialer := fshttp.NewDialer(ctx)
|
||||
tconn, err := dialer.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pass := ""
|
||||
if f.opt.Pass != "" {
|
||||
pass, err = obscure.Reveal(f.opt.Pass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
},
|
||||
}
|
||||
|
||||
session, err := d.DialContext(ctx, tconn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &conn{
|
||||
smbSession: session,
|
||||
conn: &tconn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// conn encapsulates a SMB client and corresponding SMB client
|
||||
type conn struct {
|
||||
conn *net.Conn
|
||||
smbSession *smb2.Session
|
||||
smbShare *smb2.Share
|
||||
shareName string
|
||||
}
|
||||
|
||||
// Closes the connection
|
||||
func (c *conn) close() (err error) {
|
||||
if c.smbShare != nil {
|
||||
err = c.smbShare.Umount()
|
||||
}
|
||||
sessionLogoffErr := c.smbSession.Logoff()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return sessionLogoffErr
|
||||
}
|
||||
|
||||
// True if it's closed
|
||||
func (c *conn) closed() bool {
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr == nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
//
|
||||
// Call removeSession() when done
|
||||
func (f *Fs) addSession() {
|
||||
atomic.AddInt32(&f.sessions, 1)
|
||||
}
|
||||
|
||||
// Show the SMB session is no longer in use
|
||||
func (f *Fs) removeSession() {
|
||||
atomic.AddInt32(&f.sessions, -1)
|
||||
}
|
||||
|
||||
// getSessions shows whether there are any sessions in use
|
||||
func (f *Fs) getSessions() int32 {
|
||||
return atomic.LoadInt32(&f.sessions)
|
||||
}
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
}
|
||||
if share != "" {
|
||||
// mount the specified share as well if user requested
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
_ = c.smbSession.Logoff()
|
||||
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
|
||||
}
|
||||
c.smbShare = c.smbShare.WithContext(ctx)
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Ensure the specified share is mounted or the session is unmounted
|
||||
func (c *conn) mountShare(share string) (err error) {
|
||||
if c.shareName == share {
|
||||
return nil
|
||||
}
|
||||
if c.smbShare != nil {
|
||||
err = c.smbShare.Umount()
|
||||
c.smbShare = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if share != "" {
|
||||
c.smbShare, err = c.smbSession.Mount(share)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.shareName = share
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a SMB connection from the pool, or open a new one
|
||||
func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
accounting.LimitTPS(ctx)
|
||||
f.poolMu.Lock()
|
||||
for len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
f.pool = f.pool[1:]
|
||||
err = c.mountShare(share)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
fs.Debugf(f, "Discarding unusable SMB connection: %v", err)
|
||||
c = nil
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
c, err = f.newConnection(ctx, share)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return a SMB connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
func (f *Fs) putConnection(pc **conn) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
|
||||
var nopErr error
|
||||
if c.smbShare != nil {
|
||||
// stat the current directory
|
||||
_, nopErr = c.smbShare.Stat(".")
|
||||
} else {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.close()
|
||||
return
|
||||
}
|
||||
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// Drain the pool of any connections
|
||||
func (f *Fs) drainPool(ctx context.Context) (err error) {
|
||||
f.poolMu.Lock()
|
||||
defer f.poolMu.Unlock()
|
||||
if sessions := f.getSessions(); sessions != 0 {
|
||||
fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions)
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.opt.IdleTimeout > 0 {
|
||||
f.drain.Stop()
|
||||
}
|
||||
if len(f.pool) != 0 {
|
||||
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
|
||||
}
|
||||
for i, c := range f.pool {
|
||||
if !c.closed() {
|
||||
cErr := c.close()
|
||||
if cErr != nil {
|
||||
err = cErr
|
||||
}
|
||||
}
|
||||
f.pool[i] = nil
|
||||
}
|
||||
f.pool = nil
|
||||
return err
|
||||
}
|
||||
789
backend/smb/smb.go
Normal file
789
backend/smb/smb.go
Normal file
@@ -0,0 +1,789 @@
|
||||
// Package smb provides an interface to SMB servers
|
||||
package smb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/env"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const (
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 2 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
var (
|
||||
currentUser = env.CurrentUser()
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "smb",
|
||||
Description: "SMB / CIFS",
|
||||
NewFs: NewFs,
|
||||
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "SMB username.",
|
||||
Default: currentUser,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "SMB port number.",
|
||||
Default: 445,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "SMB password.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
Help: `Max time before closing idle connections.
|
||||
|
||||
If no connections have been returned to the connection pool in the time
|
||||
given, rclone will empty the connection pool.
|
||||
|
||||
Set to 0 to keep connections indefinitely.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hide_special_share",
|
||||
Help: "Hide special shares (e.g. print$) which users aren't supposed to access.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "case_insensitive",
|
||||
Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.",
|
||||
Default: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: encoder.EncodeZero |
|
||||
// path separator
|
||||
encoder.EncodeSlash |
|
||||
encoder.EncodeBackSlash |
|
||||
// windows
|
||||
encoder.EncodeWin |
|
||||
encoder.EncodeCtl |
|
||||
encoder.EncodeDot |
|
||||
// the file turns into 8.3 names (and cannot be converted back)
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeRightPeriod |
|
||||
//
|
||||
encoder.EncodeInvalidUtf8,
|
||||
},
|
||||
}})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
Port string `config:"port"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a SMB remote
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed config options
|
||||
features *fs.Features // optional features
|
||||
pacer *fs.Pacer // pacer for operations
|
||||
|
||||
sessions int32
|
||||
poolMu sync.Mutex
|
||||
pool []*conn
|
||||
drain *time.Timer // used to drain the pool when we stop using the connections
|
||||
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Object describes a file at the server
|
||||
type Object struct {
|
||||
fs *Fs // reference to Fs
|
||||
remote string // the remote path
|
||||
statResult os.FileInfo
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ctx: ctx,
|
||||
root: root,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: opt.CaseInsensitive,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
// set the pool drainer timer going
|
||||
if opt.IdleTimeout > 0 {
|
||||
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
|
||||
}
|
||||
|
||||
// test if the root exists as a file
|
||||
share, dir := f.split("")
|
||||
if share == "" || dir == "" {
|
||||
return f, nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
// ignore stat error here
|
||||
return f, nil
|
||||
}
|
||||
if !stat.IsDir() {
|
||||
f.root, err = path.Dir(root), fs.ErrorIsFile
|
||||
}
|
||||
fs.Debugf(f, "Using root directory %q", f.root)
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
bucket, file := f.split("")
|
||||
if bucket == "" {
|
||||
return fmt.Sprintf("smb://%s@%s:%s/", f.opt.User, f.opt.Host, f.opt.Port)
|
||||
}
|
||||
return fmt.Sprintf("smb://%s@%s:%s/%s/%s", f.opt.User, f.opt.Host, f.opt.Port, bucket, file)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Hashes returns nothing as SMB itself doesn't have a way to tell checksums
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.NewHashSet()
|
||||
}
|
||||
|
||||
// Precision returns the precision of mtime
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// NewObject creates a new file object
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
share, path := f.split(remote)
|
||||
return f.findObjectSeparate(ctx, share, path)
|
||||
}
|
||||
|
||||
func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Object, error) {
|
||||
if share == "" || path == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
|
||||
return f.makeEntry(share, path, stat), nil
|
||||
}
|
||||
|
||||
// Mkdir creates a directory on the server
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
share, path := f.split(dir)
|
||||
if share == "" || path == "" {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes an empty directory on the server
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
share, path := f.split(dir)
|
||||
if share == "" || path == "" {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.Remove(f.toSambaPath(path))
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
// Put uploads a file
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
|
||||
err := o.Update(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Object, err error) {
|
||||
dstShare, dstPath := f.split(remote)
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcShare, srcPath := srcObj.split()
|
||||
if dstShare != srcShare {
|
||||
fs.Debugf(src, "Can't move - must be on the same share")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
err = f.ensureDirectory(ctx, dstShare, dstPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
cn, err := f.getConnection(ctx, dstShare)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, translateError(err, false)
|
||||
}
|
||||
return f.findObjectSeparate(ctx, dstShare, dstPath)
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
dstShare, dstPath := f.split(dstRemote)
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcShare, srcPath := srcFs.split(srcRemote)
|
||||
if dstShare != srcShare {
|
||||
fs.Debugf(src, "Can't move - must be on the same share")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
err = f.ensureDirectory(ctx, dstShare, dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
cn, err := f.getConnection(ctx, dstShare)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
_, err = cn.smbShare.Stat(dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
|
||||
return translateError(err, true)
|
||||
}
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// List files and directories in a directory
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
share, _path := f.split(dir)
|
||||
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.putConnection(&cn)
|
||||
|
||||
if share == "" {
|
||||
shares, err := cn.smbSession.ListSharenames()
|
||||
for _, shh := range shares {
|
||||
shh = f.toNativePath(shh)
|
||||
if strings.HasSuffix(shh, "$") && f.opt.HideSpecial {
|
||||
continue
|
||||
}
|
||||
entries = append(entries, fs.NewDir(shh, time.Time{}))
|
||||
}
|
||||
return entries, err
|
||||
}
|
||||
|
||||
dirents, err := cn.smbShare.ReadDir(f.toSambaPath(_path))
|
||||
if err != nil {
|
||||
return entries, translateError(err, true)
|
||||
}
|
||||
for _, file := range dirents {
|
||||
nfn := f.toNativePath(file.Name())
|
||||
if file.IsDir() {
|
||||
entries = append(entries, fs.NewDir(path.Join(dir, nfn), file.ModTime()))
|
||||
} else {
|
||||
entries = append(entries, f.makeEntryRelative(share, _path, nfn, file))
|
||||
}
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// About returns things about remaining and used spaces
|
||||
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
|
||||
share, dir := f.split("/")
|
||||
if share == "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
dir = f.toSambaPath(dir)
|
||||
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := cn.smbShare.Statfs(dir)
|
||||
f.putConnection(&cn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs := int64(stat.BlockSize())
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
|
||||
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
|
||||
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
return f.drainPool(ctx)
|
||||
}
|
||||
|
||||
func (f *Fs) makeEntry(share, _path string, stat os.FileInfo) *Object {
|
||||
remote := path.Join(share, _path)
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: trimPathPrefix(remote, f.root),
|
||||
statResult: stat,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo) *Object {
|
||||
return f.makeEntry(share, path.Join(_path, relative), stat)
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path.Dir(_path)), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
/// Object
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime is the last modified time (read-only)
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.statResult.ModTime()
|
||||
}
|
||||
|
||||
// Size is the file length
|
||||
func (o *Object) Size() int64 {
|
||||
return o.statResult.Size()
|
||||
}
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash always returns empty value
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Storable returns if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets modTime on a particular file
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
|
||||
share, reqDir := o.split()
|
||||
if share == "" || reqDir == "" {
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
reqDir = o.fs.toSambaPath(reqDir)
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer o.fs.putConnection(&cn)
|
||||
|
||||
err = cn.smbShare.Chtimes(reqDir, t, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := cn.smbShare.Stat(reqDir)
|
||||
if err == nil {
|
||||
o.statResult = fi
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return nil, fs.ErrorIsDir
|
||||
}
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
pos, err := fl.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: %w", err)
|
||||
}
|
||||
if pos != offset {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
|
||||
}
|
||||
|
||||
in = readers.NewLimitedReadCloser(fl, limit)
|
||||
in = &boundReadCloser{
|
||||
rc: in,
|
||||
close: func() error {
|
||||
o.fs.putConnection(&cn)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// Update the Object from in with modTime and size
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
|
||||
err = o.fs.ensureDirectory(ctx, share, filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to make parent directories: %w", err)
|
||||
}
|
||||
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
o.fs.addSession() // Show session in use
|
||||
defer o.fs.removeSession()
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
o.statResult, _ = cn.smbShare.Stat(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
}()
|
||||
|
||||
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
// Windows doesn't allow removal of files without closing file
|
||||
removeErr := fl.Close()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "failed to close the file for delete: %v", removeErr)
|
||||
// try to remove the file anyway; the file may be already closed
|
||||
}
|
||||
|
||||
removeErr = cn.smbShare.Remove(filename)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(src, "failed to remove: %v", removeErr)
|
||||
} else {
|
||||
fs.Debugf(src, "removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = fl.ReadFrom(in)
|
||||
if err != nil {
|
||||
remove()
|
||||
return fmt.Errorf("Update ReadFrom failed: %w", err)
|
||||
}
|
||||
|
||||
err = fl.Close()
|
||||
if err != nil {
|
||||
remove()
|
||||
return fmt.Errorf("Update Close failed: %w", err)
|
||||
}
|
||||
|
||||
// Set the modified time
|
||||
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Update SetModTime failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
share, filename := o.split()
|
||||
if share == "" || filename == "" {
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
filename = o.fs.toSambaPath(filename)
|
||||
|
||||
cn, err := o.fs.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cn.smbShare.Remove(filename)
|
||||
o.fs.putConnection(&cn)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// String converts this Object to a string
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
/// Misc
|
||||
|
||||
// split returns share name and path in the share from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (shareName, filepath string) {
|
||||
return bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
}
|
||||
|
||||
// split returns share name and path in the share from the object
|
||||
func (o *Object) split() (shareName, filepath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func (f *Fs) toSambaPath(path string) string {
|
||||
// 1. encode via Rclone's escaping system
|
||||
// 2. convert to backslash-separated path
|
||||
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(path), "/", "\\")
|
||||
}
|
||||
|
||||
func (f *Fs) toNativePath(path string) string {
|
||||
// 1. convert *back* to slash-separated path
|
||||
// 2. encode via Rclone's escaping system
|
||||
return f.opt.Enc.ToStandardPath(strings.ReplaceAll(path, "\\", "/"))
|
||||
}
|
||||
|
||||
func ensureSuffix(s, suffix string) string {
|
||||
if strings.HasSuffix(s, suffix) {
|
||||
return s
|
||||
}
|
||||
return s + suffix
|
||||
}
|
||||
|
||||
func trimPathPrefix(s, prefix string) string {
|
||||
// we need to clean the paths to make tests pass!
|
||||
s = betterPathClean(s)
|
||||
prefix = betterPathClean(prefix)
|
||||
if s == prefix || s == prefix+"/" {
|
||||
return ""
|
||||
}
|
||||
prefix = ensureSuffix(prefix, "/")
|
||||
return strings.TrimPrefix(s, prefix)
|
||||
}
|
||||
|
||||
func betterPathClean(p string) string {
|
||||
d := path.Clean(p)
|
||||
if d == "." {
|
||||
return ""
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
type boundReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
close func() error
|
||||
}
|
||||
|
||||
func (r *boundReadCloser) Read(p []byte) (n int, err error) {
|
||||
return r.rc.Read(p)
|
||||
}
|
||||
|
||||
func (r *boundReadCloser) Close() error {
|
||||
err1 := r.rc.Close()
|
||||
err2 := r.close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
func translateError(e error, dir bool) error {
|
||||
if os.IsNotExist(e) {
|
||||
if dir {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ io.ReadCloser = &boundReadCloser{}
|
||||
)
|
||||
17
backend/smb/smb_test.go
Normal file
17
backend/smb/smb_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test smb filesystem interface
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/smb"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSMB:rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -2,7 +2,7 @@ package sugarsync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestErrorHandler(t *testing.T) {
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resp := http.Response{
|
||||
Body: ioutil.NopCloser(bytes.NewBufferString(test.body)),
|
||||
Body: io.NopCloser(bytes.NewBufferString(test.body)),
|
||||
StatusCode: test.code,
|
||||
Status: test.status,
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ const (
|
||||
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
|
||||
)
|
||||
|
||||
// SharedOptions are shared between swift and hubic
|
||||
// SharedOptions are shared between swift and backends which depend on swift
|
||||
var SharedOptions = []fs.Option{{
|
||||
Name: "chunk_size",
|
||||
Help: `Above this size files will be chunked into a _segments container.
|
||||
@@ -63,6 +63,32 @@ Rclone will still chunk files bigger than chunk_size when doing normal
|
||||
copy operations.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_large_objects",
|
||||
Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
|
||||
|
||||
Swift cannot transparently store files bigger than 5 GiB. There are
|
||||
two schemes for doing that, static or dynamic large objects, and the
|
||||
API does not allow rclone to determine whether a file is a static or
|
||||
dynamic large object without doing a HEAD on the object. Since these
|
||||
need to be treated differently, this means rclone has to issue HEAD
|
||||
requests for objects for example when reading checksums.
|
||||
|
||||
When |no_large_objects| is set, rclone will assume that there are no
|
||||
static or dynamic large objects stored. This means it can stop doing
|
||||
the extra HEAD calls which in turn increases performance greatly
|
||||
especially when doing a swift to swift transfer with |--checksum| set.
|
||||
|
||||
Setting this option implies |no_chunk| and also that no files will be
|
||||
uploaded in chunks, so files bigger than 5 GiB will just fail on
|
||||
upload.
|
||||
|
||||
If you set this option and there *are* static or dynamic large objects,
|
||||
then this will give incorrect hashes for them. Downloads will succeed,
|
||||
but other operations such as Remove and Copy will fail.
|
||||
`, "|", "`"),
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -222,6 +248,7 @@ type Options struct {
|
||||
EndpointType string `config:"endpoint_type"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
NoChunk bool `config:"no_chunk"`
|
||||
NoLargeObjects bool `config:"no_large_objects"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -1100,15 +1127,24 @@ func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
|
||||
|
||||
// isDynamicLargeObject checks for X-Object-Manifest header
|
||||
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
return o.hasHeader(ctx, "X-Object-Manifest")
|
||||
}
|
||||
|
||||
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
|
||||
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
return o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
}
|
||||
|
||||
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
|
||||
if o.fs.opt.NoLargeObjects {
|
||||
return false, nil
|
||||
}
|
||||
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
|
||||
if result {
|
||||
return
|
||||
@@ -1464,7 +1500,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
headers := m.ObjectHeaders()
|
||||
fs.OpenOptionAddHeaders(options, headers)
|
||||
|
||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||
if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
|
||||
_, err = o.updateChunks(ctx, in, headers, size, contentType)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
@@ -136,7 +135,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
buf := bytes.NewBufferString(contents[:errPosition])
|
||||
errMessage := "potato"
|
||||
er := &readers.ErrorReader{Err: errors.New(errMessage)}
|
||||
in := ioutil.NopCloser(io.MultiReader(buf, er))
|
||||
in := io.NopCloser(io.MultiReader(buf, er))
|
||||
|
||||
file.Size = contentSize
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -87,7 +86,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||
if len(entries) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
_, _ = io.Copy(io.Discard, readers[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -501,7 +500,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
if len(upstreams) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
_, _ = io.Copy(io.Discard, readers[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -894,18 +893,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
canMove, slowHash := true, false
|
||||
for _, f := range upstreams {
|
||||
features = features.Mask(ctx, f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -239,7 +238,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -1219,7 +1218,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if partialContent && resp.StatusCode == 200 {
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_ = resp.Body.Close()
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -240,7 +239,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
log.Printf("Failed to resolve path: %v", err)
|
||||
return ""
|
||||
}
|
||||
err = ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
log.Printf("Failed to write %s: %v", jsonPath, err)
|
||||
return ""
|
||||
@@ -476,7 +475,7 @@ func main() {
|
||||
run("mkdir", "build")
|
||||
}
|
||||
chdir("build")
|
||||
err := ioutil.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
|
||||
err := os.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't write version.txt: %v", err)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -168,7 +167,7 @@ func defaultBinDir() string {
|
||||
|
||||
// read the body or an error message
|
||||
func readBody(in io.Reader) string {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error reading body: %v", err.Error())
|
||||
}
|
||||
|
||||
@@ -49,7 +49,6 @@ docs = [
|
||||
"hdfs.md",
|
||||
"hidrive.md",
|
||||
"http.md",
|
||||
"hubic.md",
|
||||
"internetarchive.md",
|
||||
"jottacloud.md",
|
||||
"koofr.md",
|
||||
@@ -60,6 +59,7 @@ docs = [
|
||||
"azureblob.md",
|
||||
"onedrive.md",
|
||||
"opendrive.md",
|
||||
"oracleobjectstorage.md",
|
||||
"qingstor.md",
|
||||
"sia.md",
|
||||
"swift.md",
|
||||
@@ -68,6 +68,7 @@ docs = [
|
||||
"putio.md",
|
||||
"seafile.md",
|
||||
"sftp.md",
|
||||
"smb.md",
|
||||
"storj.md",
|
||||
"sugarsync.md",
|
||||
"tardigrade.md", # stub only to redirect to storj.md
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -56,7 +55,7 @@ func main() {
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ else
|
||||
fi
|
||||
|
||||
rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \
|
||||
--fast-list \
|
||||
--include "/${version}**" \
|
||||
--include "/branch/${version}**" \
|
||||
--include "/branch/*/${version}**" \
|
||||
memstore:beta-rclone-org
|
||||
|
||||
@@ -5,7 +5,6 @@ package bilib
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -106,7 +105,7 @@ func CopyDir(src string, dst string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
entries, err := ioutil.ReadDir(src)
|
||||
entries, err := os.ReadDir(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -122,7 +121,7 @@ func CopyDir(src string, dst string) (err error) {
|
||||
}
|
||||
} else {
|
||||
// Skip symlinks.
|
||||
if entry.Mode()&os.ModeSymlink != 0 {
|
||||
if entry.Type()&os.ModeSymlink != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ package bilib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
@@ -57,5 +57,5 @@ func SaveList(list []string, path string) error {
|
||||
_, _ = buf.WriteString(strconv.Quote(s))
|
||||
_ = buf.WriteByte('\n')
|
||||
}
|
||||
return ioutil.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
return os.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
@@ -303,7 +302,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
|
||||
// Execute test scenario
|
||||
scenFile := filepath.Join(b.testDir, "scenario.txt")
|
||||
scenBuf, err := ioutil.ReadFile(scenFile)
|
||||
scenBuf, err := os.ReadFile(scenFile)
|
||||
scenReplacer := b.newReplacer(false)
|
||||
require.NoError(b.t, err)
|
||||
b.step = 0
|
||||
@@ -903,8 +902,8 @@ func (b *bisyncTest) compareResults() int {
|
||||
// save mangled logs so difference is easier on eyes
|
||||
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
|
||||
resultFile := filepath.Join(b.logDir, "mangled.result.log")
|
||||
require.NoError(b.t, ioutil.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
|
||||
require.NoError(b.t, ioutil.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
require.NoError(b.t, os.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
|
||||
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
}
|
||||
|
||||
if goldenText == resultText {
|
||||
@@ -974,7 +973,7 @@ func (b *bisyncTest) storeGolden() {
|
||||
|
||||
goldName := b.toGolden(fileName)
|
||||
goldPath := filepath.Join(b.goldenDir, goldName)
|
||||
err := ioutil.WriteFile(goldPath, []byte(text), bilib.PermSecure)
|
||||
err := os.WriteFile(goldPath, []byte(text), bilib.PermSecure)
|
||||
assert.NoError(b.t, err, "writing golden file %s", goldName)
|
||||
|
||||
if goldName != fileName {
|
||||
@@ -986,7 +985,7 @@ func (b *bisyncTest) storeGolden() {
|
||||
|
||||
// mangleResult prepares test logs or listings for comparison
|
||||
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
buf, err := ioutil.ReadFile(filepath.Join(dir, file))
|
||||
buf, err := os.ReadFile(filepath.Join(dir, file))
|
||||
require.NoError(b.t, err)
|
||||
text := string(buf)
|
||||
|
||||
@@ -1205,7 +1204,7 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
|
||||
}
|
||||
|
||||
func (b *bisyncTest) listDir(dir string) (names []string) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
files, err := os.ReadDir(dir)
|
||||
require.NoError(b.t, err)
|
||||
for _, file := range files {
|
||||
names = append(names, filepath.Base(file.Name()))
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -198,7 +197,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
||||
_ = f.Close()
|
||||
|
||||
hashFile := filtersFile + ".md5"
|
||||
wantHash, err := ioutil.ReadFile(hashFile)
|
||||
wantHash, err := os.ReadFile(hashFile)
|
||||
if err != nil && !opt.Resync {
|
||||
return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
|
||||
}
|
||||
@@ -209,7 +208,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
||||
|
||||
if opt.Resync {
|
||||
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
||||
if err := ioutil.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -81,7 +80,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = ioutil.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
if err = os.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", lockFile)
|
||||
|
||||
@@ -4,7 +4,6 @@ package cat
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -77,7 +76,7 @@ Note that if offset is negative it will count from the end, so
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
var w io.Writer = os.Stdout
|
||||
if discard {
|
||||
w = ioutil.Discard
|
||||
w = io.Discard
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.Cat(context.Background(), fsrc, w, offset, count)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -9,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCompletionBash(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_bash")
|
||||
tempFile, err := os.CreateTemp("", "completion_bash")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -18,14 +17,14 @@ func TestCompletionBash(t *testing.T) {
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
bs, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionBashStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -37,13 +36,13 @@ func TestCompletionBashStdout(t *testing.T) {
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
output, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionZsh(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -52,14 +51,14 @@ func TestCompletionZsh(t *testing.T) {
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
bs, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionZshStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -70,13 +69,13 @@ func TestCompletionZshStdout(t *testing.T) {
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
output, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionFish(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_fish")
|
||||
tempFile, err := os.CreateTemp("", "completion_fish")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -85,14 +84,14 @@ func TestCompletionFish(t *testing.T) {
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
bs, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionFishStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -104,7 +103,7 @@ func TestCompletionFishStdout(t *testing.T) {
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
output, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package gendocs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
@@ -71,7 +70,7 @@ rclone.org website.`,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
|
||||
err = os.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -129,7 +128,7 @@ rclone.org website.`,
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -140,7 +139,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
### SEE ALSO`, 1)
|
||||
// outdent all the titles by one
|
||||
doc = outdentTitle.ReplaceAllString(doc, `$1`)
|
||||
err = ioutil.WriteFile(path, []byte(doc), 0777)
|
||||
err = os.WriteFile(path, []byte(doc), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -60,11 +59,11 @@ func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
|
||||
|
||||
if !bytes.Equal(buf1, buf2) {
|
||||
log.Printf("Dumping different blocks")
|
||||
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
|
||||
err = os.WriteFile("/tmp/z1", buf1, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z1: %v", err)
|
||||
}
|
||||
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
|
||||
err = os.WriteFile("/tmp/z2", buf2, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z2: %v", err)
|
||||
}
|
||||
|
||||
@@ -235,8 +235,8 @@ applications won't work with their files on an rclone mount without
|
||||
|--vfs-cache-mode writes| or |--vfs-cache-mode full|.
|
||||
See the [VFS File Caching](#vfs-file-caching) section for more info.
|
||||
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2,
|
||||
Hubic) do not support the concept of empty directories, so empty
|
||||
The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
|
||||
do not support the concept of empty directories, so empty
|
||||
directories will have a tendency to disappear once they fall out of
|
||||
the directory cache.
|
||||
|
||||
|
||||
@@ -97,6 +97,10 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if mountOpt.Daemon {
|
||||
return nil, errors.New("Daemon Option not supported over the API")
|
||||
}
|
||||
|
||||
mountType, err := in.GetString("mountType")
|
||||
|
||||
mountMu.Lock()
|
||||
@@ -122,7 +126,15 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
log.Printf("mount FAILED: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err = mnt.Wait(); err != nil {
|
||||
log.Printf("unmount FAILED: %v", err)
|
||||
return
|
||||
}
|
||||
mountMu.Lock()
|
||||
defer mountMu.Unlock()
|
||||
delete(liveMounts, mountPoint)
|
||||
}()
|
||||
// Add mount to list if mount point was successfully created
|
||||
liveMounts[mountPoint] = mnt
|
||||
|
||||
@@ -246,7 +258,7 @@ func listMountsRc(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
for _, k := range keys {
|
||||
m := liveMounts[k]
|
||||
info := MountInfo{
|
||||
Fs: m.Fs.Name(),
|
||||
Fs: fs.ConfigString(m.Fs),
|
||||
MountPoint: m.MountPoint,
|
||||
MountedOn: m.MountedOn,
|
||||
}
|
||||
@@ -262,8 +274,11 @@ func init() {
|
||||
Path: "mount/unmountall",
|
||||
AuthRequired: true,
|
||||
Fn: unmountAll,
|
||||
Title: "Show current mount points",
|
||||
Help: `This shows currently mounted points, which can be used for performing an unmount.
|
||||
Title: "Unmount all active mounts",
|
||||
Help: `
|
||||
rclone allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with
|
||||
FUSE.
|
||||
|
||||
This takes no parameters and returns error if unmount does not succeed.
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package mountlib_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -36,7 +35,7 @@ func TestRc(t *testing.T) {
|
||||
assert.NotNil(t, getMountTypes)
|
||||
|
||||
localDir := t.TempDir()
|
||||
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
err := os.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
mountPoint := t.TempDir()
|
||||
|
||||
@@ -89,11 +89,12 @@ func helpText() (tr []string) {
|
||||
" ↑,↓ or k,j to Move",
|
||||
" →,l to enter",
|
||||
" ←,h to return",
|
||||
" c toggle counts",
|
||||
" g toggle graph",
|
||||
" c toggle counts",
|
||||
" a toggle average size in directory",
|
||||
" m toggle modified time",
|
||||
" u toggle human-readable format",
|
||||
" n,s,C,A sort by name,size,count,average size",
|
||||
" n,s,C,A,M sort by name,size,count,asize,mtime",
|
||||
" d delete file/directory",
|
||||
" v select file/directory",
|
||||
" V enter visual select mode",
|
||||
@@ -131,12 +132,14 @@ type UI struct {
|
||||
showGraph bool // toggle showing graph
|
||||
showCounts bool // toggle showing counts
|
||||
showDirAverageSize bool // toggle average size
|
||||
showModTime bool // toggle showing timestamps
|
||||
humanReadable bool // toggle human-readable format
|
||||
visualSelectMode bool // toggle visual selection mode
|
||||
sortByName int8 // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize int8
|
||||
sortByName int8 // +1 for normal (lexical), 0 for off, -1 for reverse
|
||||
sortBySize int8 // +1 for normal (largest first), 0 for off, -1 for reverse (smallest first)
|
||||
sortByCount int8
|
||||
sortByAverageSize int8
|
||||
sortByModTime int8 // +1 for normal (newest first), 0 for off, -1 for reverse (oldest first)
|
||||
dirPosMap map[string]dirPos // store for directory positions
|
||||
selectedEntries map[string]dirPos // selected entries of current directory
|
||||
}
|
||||
@@ -332,6 +335,7 @@ func (u *UI) hasEmptyDir() bool {
|
||||
|
||||
// Draw the current screen
|
||||
func (u *UI) Draw() error {
|
||||
ctx := context.Background()
|
||||
w, h := termbox.Size()
|
||||
u.dirListHeight = h - 3
|
||||
|
||||
@@ -365,7 +369,13 @@ func (u *UI) Draw() error {
|
||||
if y >= h-1 {
|
||||
break
|
||||
}
|
||||
attrs, err := u.d.AttrI(u.sortPerm[n])
|
||||
var attrs scan.Attrs
|
||||
var err error
|
||||
if u.showModTime {
|
||||
attrs, err = u.d.AttrWithModTimeI(ctx, u.sortPerm[n])
|
||||
} else {
|
||||
attrs, err = u.d.AttrI(u.sortPerm[n])
|
||||
}
|
||||
_, isSelected := u.selectedEntries[entry.String()]
|
||||
fg := termbox.ColorWhite
|
||||
if attrs.EntriesHaveErrors {
|
||||
@@ -421,6 +431,9 @@ func (u *UI) Draw() error {
|
||||
extras += strings.Repeat(" ", len(ss))
|
||||
}
|
||||
}
|
||||
if u.showModTime {
|
||||
extras += attrs.ModTime.Local().Format("2006-01-02 15:04:05") + " "
|
||||
}
|
||||
if showEmptyDir {
|
||||
if attrs.IsDir && attrs.Count == 0 && fileFlag == ' ' {
|
||||
fileFlag = 'e'
|
||||
@@ -656,8 +669,15 @@ type ncduSort struct {
|
||||
// Less is part of sort.Interface.
|
||||
func (ds *ncduSort) Less(i, j int) bool {
|
||||
var iAvgSize, jAvgSize float64
|
||||
iattrs, _ := ds.d.AttrI(ds.sortPerm[i])
|
||||
jattrs, _ := ds.d.AttrI(ds.sortPerm[j])
|
||||
var iattrs, jattrs scan.Attrs
|
||||
if ds.u.sortByModTime != 0 {
|
||||
ctx := context.Background()
|
||||
iattrs, _ = ds.d.AttrWithModTimeI(ctx, ds.sortPerm[i])
|
||||
jattrs, _ = ds.d.AttrWithModTimeI(ctx, ds.sortPerm[j])
|
||||
} else {
|
||||
iattrs, _ = ds.d.AttrI(ds.sortPerm[i])
|
||||
jattrs, _ = ds.d.AttrI(ds.sortPerm[j])
|
||||
}
|
||||
iname, jname := ds.entries[ds.sortPerm[i]].Remote(), ds.entries[ds.sortPerm[j]].Remote()
|
||||
if iattrs.Count > 0 {
|
||||
iAvgSize = iattrs.AverageSize()
|
||||
@@ -679,6 +699,14 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
case ds.u.sortByModTime < 0:
|
||||
if iattrs.ModTime != jattrs.ModTime {
|
||||
return iattrs.ModTime.Before(jattrs.ModTime)
|
||||
}
|
||||
case ds.u.sortByModTime > 0:
|
||||
if iattrs.ModTime != jattrs.ModTime {
|
||||
return iattrs.ModTime.After(jattrs.ModTime)
|
||||
}
|
||||
case ds.u.sortByCount < 0:
|
||||
if iattrs.Count != jattrs.Count {
|
||||
return iattrs.Count < jattrs.Count
|
||||
@@ -692,13 +720,17 @@ func (ds *ncduSort) Less(i, j int) bool {
|
||||
return iAvgSize < jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return iattrs.Size < jattrs.Size
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size < jattrs.Size
|
||||
}
|
||||
case ds.u.sortByAverageSize > 0:
|
||||
if iAvgSize != jAvgSize {
|
||||
return iAvgSize > jAvgSize
|
||||
}
|
||||
// if avgSize is equal, sort by size
|
||||
return iattrs.Size > jattrs.Size
|
||||
if iattrs.Size != jattrs.Size {
|
||||
return iattrs.Size > jattrs.Size
|
||||
}
|
||||
}
|
||||
// if everything equal, sort by name
|
||||
return iname < jname
|
||||
@@ -843,8 +875,9 @@ func NewUI(f fs.Fs) *UI {
|
||||
showCounts: false,
|
||||
showDirAverageSize: false,
|
||||
humanReadable: true,
|
||||
sortByName: 0, // +1 for normal, 0 for off, -1 for reverse
|
||||
sortBySize: 1,
|
||||
sortByName: 0,
|
||||
sortBySize: 1, // Sort by largest first
|
||||
sortByModTime: 0,
|
||||
sortByCount: 0,
|
||||
dirPosMap: make(map[string]dirPos),
|
||||
selectedEntries: make(map[string]dirPos),
|
||||
@@ -933,6 +966,8 @@ outer:
|
||||
u.enter()
|
||||
case 'c':
|
||||
u.showCounts = !u.showCounts
|
||||
case 'm':
|
||||
u.showModTime = !u.showModTime
|
||||
case 'g':
|
||||
u.showGraph = !u.showGraph
|
||||
case 'a':
|
||||
@@ -941,6 +976,8 @@ outer:
|
||||
u.toggleSort(&u.sortByName)
|
||||
case 's':
|
||||
u.toggleSort(&u.sortBySize)
|
||||
case 'M':
|
||||
u.toggleSort(&u.sortByModTime)
|
||||
case 'v':
|
||||
u.toggleSelectForCursor()
|
||||
case 'V':
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
@@ -31,6 +32,7 @@ type Dir struct {
|
||||
// in the total count. They are not included in the size, i.e. treated
|
||||
// as empty files, which means the size may be underestimated.
|
||||
type Attrs struct {
|
||||
ModTime time.Time
|
||||
Size int64
|
||||
Count int64
|
||||
CountUnknownSize int64
|
||||
@@ -193,20 +195,33 @@ func (d *Dir) Attr() (size int64, count int64) {
|
||||
return d.size, d.count
|
||||
}
|
||||
|
||||
// attrI returns the size, count and flags for the i-th directory entry
|
||||
func (d *Dir) attrI(i int) (attrs Attrs, err error) {
|
||||
subDir, isDir := d.getDir(i)
|
||||
if !isDir {
|
||||
return Attrs{time.Time{}, d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
|
||||
}
|
||||
if subDir == nil {
|
||||
return Attrs{time.Time{}, 0, 0, 0, true, false, false}, nil
|
||||
}
|
||||
size, count := subDir.Attr()
|
||||
return Attrs{time.Time{}, size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
|
||||
}
|
||||
|
||||
// AttrI returns the size, count and flags for the i-th directory entry
|
||||
func (d *Dir) AttrI(i int) (attrs Attrs, err error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
subDir, isDir := d.getDir(i)
|
||||
return d.attrI(i)
|
||||
}
|
||||
|
||||
if !isDir {
|
||||
return Attrs{d.entries[i].Size(), 0, 0, false, true, d.entriesHaveErrors}, d.readError
|
||||
}
|
||||
if subDir == nil {
|
||||
return Attrs{0, 0, 0, true, false, false}, nil
|
||||
}
|
||||
size, count := subDir.Attr()
|
||||
return Attrs{size, count, subDir.countUnknownSize, true, true, subDir.entriesHaveErrors}, subDir.readError
|
||||
// AttrWithModTimeI returns the modtime, size, count and flags for the i-th directory entry
|
||||
func (d *Dir) AttrWithModTimeI(ctx context.Context, i int) (attrs Attrs, err error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
attrs, err = d.attrI(i)
|
||||
attrs.ModTime = d.entries[i].ModTime(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
// Scan the Fs passed in, returning a root directory channel and an
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -204,7 +204,7 @@ func doCall(ctx context.Context, path string, in rc.Params) (out rc.Params, err
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
var body []byte
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
var bodyString string
|
||||
if err == nil {
|
||||
bodyString = string(body)
|
||||
|
||||
@@ -66,7 +66,7 @@ a lot of data, you're better off caching locally and then
|
||||
|
||||
fdst, dstFileName := cmd.NewFsDstFile(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now())
|
||||
_, err := operations.RcatSize(context.Background(), fdst, dstFileName, os.Stdin, size, time.Now(), nil)
|
||||
return err
|
||||
})
|
||||
},
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -227,7 +226,7 @@ func InstallUpdate(ctx context.Context, opt *Options) error {
|
||||
}
|
||||
|
||||
func installPackage(ctx context.Context, beta bool, version, siteURL, packageFormat string) error {
|
||||
tempFile, err := ioutil.TempFile("", "rclone.*."+packageFormat)
|
||||
tempFile, err := os.CreateTemp("", "rclone.*."+packageFormat)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write temporary package: %w", err)
|
||||
}
|
||||
@@ -357,7 +356,7 @@ func downloadUpdate(ctx context.Context, beta bool, version, siteURL, newFile, p
|
||||
}
|
||||
|
||||
if packageFormat == "deb" || packageFormat == "rpm" {
|
||||
if err := ioutil.WriteFile(newFile, archiveBuf, 0644); err != nil {
|
||||
if err := os.WriteFile(newFile, archiveBuf, 0644); err != nil {
|
||||
return fmt.Errorf("cannot write temporary .%s: %w", packageFormat, err)
|
||||
}
|
||||
return nil
|
||||
@@ -471,5 +470,5 @@ func downloadFile(ctx context.Context, url string) ([]byte, error) {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed with %s downloading %s", resp.Status, url)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ package selfupdate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -86,7 +85,7 @@ func TestInstallOnLinux(t *testing.T) {
|
||||
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path, Version: fs.Version}))
|
||||
|
||||
// Must fail on non-writable file
|
||||
assert.NoError(t, ioutil.WriteFile(path, []byte("test"), 0644))
|
||||
assert.NoError(t, os.WriteFile(path, []byte("test"), 0644))
|
||||
assert.NoError(t, os.Chmod(path, 0000))
|
||||
err = (InstallUpdate(ctx, &Options{Beta: true, Output: path}))
|
||||
assert.Error(t, err)
|
||||
@@ -101,7 +100,7 @@ func TestInstallOnLinux(t *testing.T) {
|
||||
assert.Equal(t, os.FileMode(0644), info.Mode().Perm())
|
||||
|
||||
// Must remove temporary files
|
||||
files, err := ioutil.ReadDir(testDir)
|
||||
files, err := os.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
|
||||
@@ -141,7 +140,7 @@ func TestRenameOnWindows(t *testing.T) {
|
||||
// Must not create temporary files when target doesn't exist
|
||||
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
|
||||
|
||||
files, err := ioutil.ReadDir(testDir)
|
||||
files, err := os.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
|
||||
@@ -152,7 +151,7 @@ func TestRenameOnWindows(t *testing.T) {
|
||||
assert.NoError(t, cmdWait.Start())
|
||||
|
||||
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: false, Output: path}))
|
||||
files, err = ioutil.ReadDir(testDir)
|
||||
files, err = os.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(files))
|
||||
|
||||
@@ -189,7 +188,7 @@ func TestRenameOnWindows(t *testing.T) {
|
||||
|
||||
// Updating when the "old" executable is running must produce a random "old" file
|
||||
assert.NoError(t, InstallUpdate(ctx, &Options{Beta: true, Output: path}))
|
||||
files, err = ioutil.ReadDir(testDir)
|
||||
files, err = os.ReadDir(testDir)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(files))
|
||||
|
||||
|
||||
@@ -158,7 +158,9 @@ func mediaWithResources(nodes vfs.Nodes) (vfs.Nodes, map[vfs.Node]vfs.Nodes) {
|
||||
for _, node := range nodes {
|
||||
baseName, ext := splitExt(strings.ToLower(node.Name()))
|
||||
switch ext {
|
||||
case ".srt":
|
||||
case ".srt", ".ass", ".ssa", ".sub", ".idx", ".sup", ".jss", ".txt", ".usf", ".cue", ".vtt", ".css":
|
||||
// .idx should be with .sub, .css should be with vtt otherwise they should be culled,
|
||||
// and their mimeTypes are not consistent, but anyway these negatives don't throw errors.
|
||||
subtitlesByName[baseName] = node
|
||||
default:
|
||||
mediaByName[baseName] = append(mediaByName[baseName], node)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -6,7 +6,7 @@ package data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"text/template"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -21,7 +21,7 @@ func GetTemplate() (tpl *template.Template, err error) {
|
||||
|
||||
defer fs.CheckClose(templateFile, &err)
|
||||
|
||||
templateBytes, err := ioutil.ReadAll(templateFile)
|
||||
templateBytes, err := io.ReadAll(templateFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get template read: %w", err)
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ func newServer(f fs.Fs, opt *dlnaflags.Options) (*server, error) {
|
||||
}
|
||||
|
||||
s := &server{
|
||||
AnnounceInterval: 10 * time.Second,
|
||||
AnnounceInterval: opt.AnnounceInterval,
|
||||
FriendlyName: friendlyName,
|
||||
RootDeviceUUID: makeDeviceUUID(friendlyName),
|
||||
Interfaces: interfaces,
|
||||
@@ -279,7 +279,14 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// use s.Wait() to block on the listener indefinitely.
|
||||
func (s *server) Serve() (err error) {
|
||||
if s.HTTPConn == nil {
|
||||
s.HTTPConn, err = net.Listen("tcp", s.httpListenAddr)
|
||||
// Currently, the SSDP server only listens on an IPv4 multicast address.
|
||||
// Differentiate between two INADDR_ANY addresses,
|
||||
// so that 0.0.0.0 can only listen on IPv4 addresses.
|
||||
network := "tcp4"
|
||||
if strings.Count(s.httpListenAddr, ":") > 1 {
|
||||
network = "tcp"
|
||||
}
|
||||
s.HTTPConn, err = net.Listen(network, s.httpListenAddr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -336,6 +343,30 @@ func (s *server) startSSDP() {
|
||||
|
||||
// Run SSDP server on an interface.
|
||||
func (s *server) ssdpInterface(intf net.Interface) {
|
||||
// Figure out whether should an ip be announced
|
||||
ipfilterFn := func(ip net.IP) bool {
|
||||
listenaddr := s.HTTPConn.Addr().String()
|
||||
listenip := listenaddr[:strings.LastIndex(listenaddr, ":")]
|
||||
switch listenip {
|
||||
case "0.0.0.0":
|
||||
if strings.Contains(ip.String(), ":") {
|
||||
// Any IPv6 address should not be announced
|
||||
// because SSDP only listen on IPv4 multicast address
|
||||
return false
|
||||
}
|
||||
return true
|
||||
case "[::]":
|
||||
// In the @Serve() section, the default settings have been made to not listen on IPv6 addresses.
|
||||
// If actually still listening on [::], then allow to announce any address.
|
||||
return true
|
||||
default:
|
||||
if listenip == ip.String() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Figure out which HTTP location to advertise based on the interface IP.
|
||||
advertiseLocationFn := func(ip net.IP) string {
|
||||
url := url.URL{
|
||||
@@ -349,6 +380,12 @@ func (s *server) ssdpInterface(intf net.Interface) {
|
||||
return url.String()
|
||||
}
|
||||
|
||||
_, err := intf.Addrs()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fs.Logf(s, "Started SSDP on %v", intf.Name)
|
||||
|
||||
// Note that the devices and services advertised here via SSDP should be
|
||||
// in agreement with the rootDesc XML descriptor that is defined above.
|
||||
ssdpServer := ssdp.Server{
|
||||
@@ -359,6 +396,7 @@ func (s *server) ssdpInterface(intf net.Interface) {
|
||||
"urn:schemas-upnp-org:service:ContentDirectory:1",
|
||||
"urn:schemas-upnp-org:service:ConnectionManager:1",
|
||||
"urn:microsoft.com:service:X_MS_MediaReceiverRegistrar:1"},
|
||||
IPFilter: ipfilterFn,
|
||||
Location: advertiseLocationFn,
|
||||
Server: serverField,
|
||||
UUID: s.RootDeviceUUID,
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"html"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -60,7 +60,7 @@ func TestRootSCPD(t *testing.T) {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// Make sure that the SCPD contains a CDS service.
|
||||
require.Contains(t, string(body),
|
||||
@@ -80,7 +80,7 @@ func TestServeContent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
actualContents, err := ioutil.ReadAll(resp.Body)
|
||||
actualContents, err := io.ReadAll(resp.Body)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Now compare the contents with the golden file.
|
||||
@@ -90,7 +90,7 @@ func TestServeContent(t *testing.T) {
|
||||
goldenReader, err := goldenFile.Open(os.O_RDONLY)
|
||||
assert.NoError(t, err)
|
||||
defer fs.CheckClose(goldenReader, &err)
|
||||
goldenContents, err := ioutil.ReadAll(goldenReader)
|
||||
goldenContents, err := io.ReadAll(goldenReader)
|
||||
assert.NoError(t, err)
|
||||
|
||||
require.Equal(t, goldenContents, actualContents)
|
||||
@@ -119,7 +119,7 @@ func TestContentDirectoryBrowseMetadata(t *testing.T) {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// should contain an appropriate URN
|
||||
require.Contains(t, string(body), "urn:schemas-upnp-org:service:ContentDirectory:1")
|
||||
@@ -145,7 +145,7 @@ func TestMediaReceiverRegistrarService(t *testing.T) {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, string(body), "<RegistrationRespMsg>")
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// expect video.mp4, video.srt, video.en.srt URLs to be in the DIDL
|
||||
require.Contains(t, string(body), "/r/video.mp4")
|
||||
@@ -201,7 +201,7 @@ func TestContentDirectoryBrowseDirectChildren(t *testing.T) {
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
body, err = io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
// expect video.mp4, video.srt, URLs to be in the DIDL
|
||||
require.Contains(t, string(body), "/r/subdir/video.mp4")
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
package dlnaflags
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -24,18 +26,20 @@ logging of all UPNP traffic.
|
||||
|
||||
// Options is the type for DLNA serving options.
|
||||
type Options struct {
|
||||
ListenAddr string
|
||||
FriendlyName string
|
||||
LogTrace bool
|
||||
InterfaceNames []string
|
||||
ListenAddr string
|
||||
FriendlyName string
|
||||
LogTrace bool
|
||||
InterfaceNames []string
|
||||
AnnounceInterval time.Duration
|
||||
}
|
||||
|
||||
// DefaultOpt contains the defaults options for DLNA serving.
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: ":7879",
|
||||
FriendlyName: "",
|
||||
LogTrace: false,
|
||||
InterfaceNames: []string{},
|
||||
ListenAddr: ":7879",
|
||||
FriendlyName: "",
|
||||
LogTrace: false,
|
||||
InterfaceNames: []string{},
|
||||
AnnounceInterval: 12 * time.Minute,
|
||||
}
|
||||
|
||||
// Opt contains the options for DLNA serving.
|
||||
@@ -49,6 +53,7 @@ func addFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *Options) {
|
||||
flags.StringVarP(flagSet, &Opt.FriendlyName, prefix+"name", "", Opt.FriendlyName, "Name of DLNA server")
|
||||
flags.BoolVarP(flagSet, &Opt.LogTrace, prefix+"log-trace", "", Opt.LogTrace, "Enable trace logging of SOAP traffic")
|
||||
flags.StringArrayVarP(flagSet, &Opt.InterfaceNames, prefix+"interface", "", Opt.InterfaceNames, "The interface to use for SSDP (repeat as necessary)")
|
||||
flags.DurationVarP(flagSet, &Opt.AnnounceInterval, prefix+"announce-interval", "", Opt.AnnounceInterval, "The interval between SSDP announcements")
|
||||
}
|
||||
|
||||
// AddFlags add the command line flags for DLNA serving.
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -280,7 +280,7 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
|
||||
}
|
||||
assert.Equal(t, wantStatus, res.StatusCode)
|
||||
|
||||
dataOut, err = ioutil.ReadAll(res.Body)
|
||||
dataOut, err = io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
err = res.Body.Close()
|
||||
require.NoError(t, err)
|
||||
@@ -389,11 +389,11 @@ func testMountAPI(t *testing.T, sockAddr string) {
|
||||
assert.Contains(t, res, "volume is in use")
|
||||
|
||||
text := []byte("banana")
|
||||
err = ioutil.WriteFile(filepath.Join(mount1, "txt"), text, 0644)
|
||||
err = os.WriteFile(filepath.Join(mount1, "txt"), text, 0644)
|
||||
assert.NoError(t, err)
|
||||
time.Sleep(tempDelay)
|
||||
|
||||
text2, err := ioutil.ReadFile(filepath.Join(path1, "txt"))
|
||||
text2, err := os.ReadFile(filepath.Join(path1, "txt"))
|
||||
assert.NoError(t, err)
|
||||
if runtime.GOOS != "windows" {
|
||||
// this check sometimes fails on windows - ignore
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -329,7 +328,7 @@ func (drv *Driver) saveState() error {
|
||||
ctx := context.Background()
|
||||
retries := fs.GetConfig(ctx).LowLevelRetries
|
||||
for i := 0; i <= retries; i++ {
|
||||
err = ioutil.WriteFile(drv.statePath, data, 0600)
|
||||
err = os.WriteFile(drv.statePath, data, 0600)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -342,7 +341,7 @@ func (drv *Driver) saveState() error {
|
||||
func (drv *Driver) restoreState(ctx context.Context) error {
|
||||
fs.Debugf(nil, "Restore state from %s", drv.statePath)
|
||||
|
||||
data, err := ioutil.ReadFile(drv.statePath)
|
||||
data, err := os.ReadFile(drv.statePath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -93,7 +92,7 @@ func writeSpecFile(addr, proto, specDir string) (string, error) {
|
||||
}
|
||||
specFile := filepath.Join(specDir, "rclone.spec")
|
||||
url := fmt.Sprintf("%s://%s", proto, addr)
|
||||
if err := ioutil.WriteFile(specFile, []byte(url), 0644); err != nil {
|
||||
if err := os.WriteFile(specFile, []byte(url), 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
fs.Debugf(nil, "Plugin spec has been written to %s", specFile)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user