1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-16 00:04:40 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nick Craig-Wood
4488bc0cd7 union: make server side copies possible from outside the union
Before this change, only server side copies within the union would be
permitted.

This change allows server side copies from outside the union to
happen, delegating the checking to the individual upstream backend.

The same change should be made to Move and DirMove

Fixes #6287
2022-09-12 13:27:22 +01:00
Nick Craig-Wood
404059fd95 operations: allow --server-side-across-configs to work between any backends
Before this change --server-side-across-configs would only work
between two backends of the same type.

This meant that server side copies from a backend to a union of that
backend didn't work where they really should have done.

Fixes #6287
2022-09-12 13:27:22 +01:00
460 changed files with 11977 additions and 33742 deletions

View File

@@ -30,7 +30,7 @@ jobs:
include:
- job_name: linux
os: ubuntu-latest
go: '1.19'
go: '1.19.x'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -41,14 +41,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '1.19'
go: '1.19.x'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
go: '1.19'
go: '1.19.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -57,14 +57,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
go: '1.19'
go: '1.19.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '1.19'
go: '1.19.x'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -74,20 +74,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.19'
go: '1.19.x'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.17
os: ubuntu-latest
go: '1.17'
go: '1.17.x'
quicktest: true
racequicktest: true
- job_name: go1.18
os: ubuntu-latest
go: '1.18'
go: '1.18.x'
quicktest: true
racequicktest: true
@@ -97,13 +97,14 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v3
uses: actions/setup-go@v2
with:
stable: 'false'
go-version: ${{ matrix.go }}
check-latest: true
@@ -161,7 +162,7 @@ jobs:
env
- name: Go module cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -225,7 +226,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
- name: Code quality test
uses: golangci/golangci-lint-action@v3
@@ -233,19 +234,6 @@ jobs:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
# Run govulncheck on the latest go version, the one we build binaries with
- name: Install Go
uses: actions/setup-go@v3
with:
go-version: 1.19
check-latest: true
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Scan for vulnerabilities
run: govulncheck ./...
android:
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
timeout-minutes: 30
@@ -254,18 +242,18 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v1
with:
go-version: 1.19
go-version: 1.19.x
- name: Go module cache
uses: actions/cache@v3
uses: actions/cache@v2
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}

View File

@@ -12,7 +12,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish image

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job
steps:
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job
steps:
- name: Checkout master
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Build and publish docker plugin

1
.gitignore vendored
View File

@@ -15,4 +15,3 @@ fuzz-build.zip
*.rej
Thumbs.db
__pycache__
*.kate-swp

View File

@@ -20,7 +20,7 @@ issues:
exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

4799
MANUAL.html generated

File diff suppressed because it is too large Load Diff

5212
MANUAL.md generated

File diff suppressed because it is too large Load Diff

5451
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -81,9 +81,6 @@ quicktest:
racequicktest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
compiletest:
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
# Do source code quality checks
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"

View File

@@ -45,12 +45,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
* Mega [:page_facing_up:](https://rclone.org/mega/)
@@ -63,20 +62,17 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* OpenDrive [:page_facing_up:](https://rclone.org/opendrive/)
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
* put.io [:page_facing_up:](https://rclone.org/putio/)
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
* Qiniu Cloud Object Storage (Kodo) [:page_facing_up:](https://rclone.org/s3/#qiniu)
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)

View File

@@ -53,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to
build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release
If rclone needs a point release due to some horrendous bug:
@@ -74,7 +66,8 @@ Set vars
First make the release branch. If this is a second point release then
this will be done already.
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
* git branch ${BASE_TAG} ${BASE_TAG}-stable
* git co ${BASE_TAG}-stable
* make startstable
Now

View File

@@ -1 +1 @@
v1.62.0
v1.60.0

View File

@@ -24,6 +24,7 @@ import (
_ "github.com/rclone/rclone/backend/hdfs"
_ "github.com/rclone/rclone/backend/hidrive"
_ "github.com/rclone/rclone/backend/http"
_ "github.com/rclone/rclone/backend/hubic"
_ "github.com/rclone/rclone/backend/internetarchive"
_ "github.com/rclone/rclone/backend/jottacloud"
_ "github.com/rclone/rclone/backend/koofr"
@@ -34,7 +35,6 @@ import (
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/oracleobjectstorage"
_ "github.com/rclone/rclone/backend/pcloud"
_ "github.com/rclone/rclone/backend/premiumizeme"
_ "github.com/rclone/rclone/backend/putio"
@@ -44,7 +44,6 @@ import (
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob

View File

@@ -1,11 +1,12 @@
// Test AzureBlob filesystem interface
//go:build !plan9 && !solaris && !js && go1.18
// +build !plan9,!solaris,!js,go1.18
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"testing"
"github.com/rclone/rclone/fs"
@@ -16,12 +17,10 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
})
}
@@ -33,6 +32,36 @@ var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
)
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
func TestServicePrincipalFileSuccess(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"password": "my secret",
"tenant": "my active directory tenant ID"
}
`
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
if assert.NoError(t, err) {
assert.NotNil(t, tokenRefresher)
}
}
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
func TestServicePrincipalFileFailure(t *testing.T) {
ctx := context.TODO()
credentials := `
{
"appId": "my application (client) ID",
"tenant": "my active directory tenant ID"
}
`
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
assert.Error(t, err)
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
}
func TestValidateAccessTier(t *testing.T) {
tests := map[string]struct {
accessTier string

View File

@@ -1,7 +1,7 @@
// Build for azureblob for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js || !go1.18
// +build plan9 solaris js !go1.18
//go:build plan9 || solaris || js
// +build plan9 solaris js
package azureblob

137
backend/azureblob/imds.go Normal file
View File

@@ -0,0 +1,137 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fshttp"
)
const (
azureResource = "https://storage.azure.com"
imdsAPIVersion = "2018-02-01"
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
)
// This custom type is used to add the port the test server has bound to
// to the request context.
type testPortKey string
type msiIdentifierType int
const (
msiClientID msiIdentifierType = iota
msiObjectID
msiResourceID
)
type userMSI struct {
Type msiIdentifierType
Value string
}
type httpError struct {
Response *http.Response
}
func (e httpError) Error() string {
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
}
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
// Metadata Service.
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
// Attempt to get an MSI token; silently continue if unsuccessful.
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
result := adal.Token{}
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
if err != nil {
fs.Debugf(nil, "Failed to create request: %v", err)
return result, err
}
params := req.URL.Query()
params.Set("resource", azureResource)
params.Set("api-version", imdsAPIVersion)
// Specify user-assigned identity if requested.
if identity != nil {
switch identity.Type {
case msiClientID:
params.Set("client_id", identity.Value)
case msiObjectID:
params.Set("object_id", identity.Value)
case msiResourceID:
params.Set("mi_res_id", identity.Value)
default:
// If this happens, the calling function and this one don't agree on
// what valid ID types exist.
return result, fmt.Errorf("unknown MSI identity type specified")
}
}
req.URL.RawQuery = params.Encode()
// The Metadata header is required by all calls to IMDS.
req.Header.Set("Metadata", "true")
// If this function is run in a test, query the test server instead of IMDS.
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
if isTest {
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
req.Host = req.URL.Host
}
// Send request
httpClient := fshttp.NewClient(ctx)
resp, err := httpClient.Do(req)
if err != nil {
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
}
defer func() { // resp and Body should not be nil
_, err = io.Copy(ioutil.Discard, resp.Body)
if err != nil {
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
}
err = resp.Body.Close()
if err != nil {
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
}
}()
// Check if the status code indicates success
// The request returns 200 currently, add 201 and 202 as well for possible extension.
switch resp.StatusCode {
case 200, 201, 202:
break
default:
body, _ := ioutil.ReadAll(resp.Body)
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
return result, httpError{Response: resp}
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
}
// Remove BOM, if any. azcopy does this so I'm following along.
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
// This would be a good place to persist the token if a large number of rclone
// invocations are being made in a short amount of time. If the token is
// persisted, the azureblob code will need to check for expiry before every
// storage API call.
err = json.Unmarshal(b, &result)
if err != nil {
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
}
return result, nil
}

View File

@@ -0,0 +1,118 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package azureblob
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := r.ParseForm()
require.NoError(t, err)
parameters := r.URL.Query()
(*actual)["path"] = r.URL.Path
(*actual)["Metadata"] = r.Header.Get("Metadata")
(*actual)["method"] = r.Method
for paramName := range parameters {
(*actual)[paramName] = parameters.Get(paramName)
}
// Make response.
response := adal.Token{}
responseBytes, err := json.Marshal(response)
require.NoError(t, err)
_, err = w.Write(responseBytes)
require.NoError(t, err)
}
}
func TestManagedIdentity(t *testing.T) {
// test user-assigned identity specifiers to use
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
tests := []struct {
identity *userMSI
identityParameterName string
expectedAbsent []string
}{
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
}
alwaysExpected := map[string]string{
"path": "/metadata/identity/oauth2/token",
"resource": "https://storage.azure.com",
"Metadata": "true",
"api-version": "2018-02-01",
"method": "GET",
}
for _, test := range tests {
actual := make(map[string]string, 10)
testServer := httptest.NewServer(handler(t, &actual))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, test.identity)
require.NoError(t, err)
// Validate expected query parameters present
expected := make(map[string]string)
for k, v := range alwaysExpected {
expected[k] = v
}
if test.identity != nil {
expected[test.identityParameterName] = test.identity.Value
}
for key := range expected {
value, exists := actual[key]
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
test.identityParameterName, key) {
assert.Equalf(t, expected[key], value,
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
}
}
// Validate unexpected query parameters absent
for _, key := range test.expectedAbsent {
_, exists := actual[key]
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
}
}
}
func errorHandler(resultCode int) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Test error generated", resultCode)
}
}
func TestIMDSErrors(t *testing.T) {
errorCodes := []int{404, 429, 500}
for _, code := range errorCodes {
testServer := httptest.NewServer(errorHandler(code))
defer testServer.Close()
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
require.NoError(t, err)
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
_, err = GetMSIToken(ctx, nil)
require.Error(t, err)
httpErr, ok := err.(httpError)
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
}
}

View File

@@ -17,9 +17,9 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
@@ -183,7 +183,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
}
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
file, err := os.ReadFile(configFile)
file, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
}

View File

@@ -11,6 +11,7 @@ import (
goflag "flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@@ -101,12 +102,14 @@ func TestMain(m *testing.M) {
func TestInternalListRootAndInnerRemotes(t *testing.T) {
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// Instantiate inner fs
innerFolder := "inner"
runInstance.mkdir(t, rootFs, innerFolder)
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
runInstance.writeObjectString(t, rootFs2, "one", "content")
listRoot, err := runInstance.list(t, rootFs, "")
@@ -164,7 +167,7 @@ func TestInternalVfsCache(t *testing.T) {
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
for _, r := range li2 {
var err error
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
if err != nil || len(ci) == 0 {
log.Printf("========== '%v' not in cache", r)
} else {
@@ -223,7 +226,8 @@ func TestInternalVfsCache(t *testing.T) {
func TestInternalObjWrapFsFound(t *testing.T) {
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -255,7 +259,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
func TestInternalObjNotFound(t *testing.T) {
id := fmt.Sprintf("tionf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
obj, err := rootFs.NewObject(context.Background(), "404")
require.Error(t, err)
@@ -265,7 +270,8 @@ func TestInternalObjNotFound(t *testing.T) {
func TestInternalCachedWrittenContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -292,7 +298,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
// write the object
runInstance.writeRemoteString(t, rootFs, "one", "one content")
@@ -310,7 +317,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
testy.SkipUnreliable(t)
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
var err error
// create some rand test data
@@ -339,7 +347,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -369,7 +378,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
func TestInternalLargeWrittenContentMatches(t *testing.T) {
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skip("test skipped with crypt remote")
}
@@ -395,7 +405,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -449,7 +460,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
func TestInternalMoveWithNotify(t *testing.T) {
id := fmt.Sprintf("timwn%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -535,7 +547,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
id := fmt.Sprintf("tincep%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
if !runInstance.wrappedIsExternal {
t.Skipf("Not external")
}
@@ -621,7 +634,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -653,7 +667,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
func TestInternalCacheWrites(t *testing.T) {
id := "ticw"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -674,7 +689,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
t.Skip("Skip test on windows/386")
}
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -709,7 +725,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
func TestInternalExpiredEntriesRemoved(t *testing.T) {
id := fmt.Sprintf("tieer%v", time.Now().Unix())
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
defer runInstance.cleanupFs(t, rootFs, boltDb)
cfs, err := runInstance.getCacheFs(rootFs)
require.NoError(t, err)
@@ -746,7 +763,9 @@ func TestInternalBug2117(t *testing.T) {
vfsflags.Opt.DirCacheTime = time.Second * 10
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
if runInstance.rootIsCrypt {
t.Skipf("skipping crypt")
@@ -822,7 +841,7 @@ func newRun() *run {
}
if uploadDir == "" {
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
if err != nil {
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
}
@@ -847,7 +866,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
return enc
}
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
fstest.Initialise()
remoteExists := false
for _, s := range config.FileSections() {
@@ -940,15 +959,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
t.Cleanup(func() {
runInstance.cleanupFs(t, f)
})
return f, boltDb
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
err := f.Features().Purge(context.Background(), "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
@@ -970,7 +984,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
chunk := int64(1024)
cnt := size / chunk
left := size % chunk
f, err := os.CreateTemp("", "rclonecache-tempfile")
f, err := ioutil.TempFile("", "rclonecache-tempfile")
require.NoError(t, err)
for i := 0; i < int(cnt); i++ {

View File

@@ -21,8 +21,10 @@ import (
func TestInternalUploadTempDirCreated(t *testing.T) {
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
runInstance.newCacheFs(t, remoteName, id, false, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
defer runInstance.cleanupFs(t, rootFs, boltDb)
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
require.NoError(t, err)
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
}
func TestInternalUploadMoveExistingFile(t *testing.T) {
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
func TestInternalUploadTempPathCleaned(t *testing.T) {
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "one")
require.NoError(t, err)
@@ -152,8 +162,10 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
func TestInternalUploadQueueMoreFiles(t *testing.T) {
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
err := rootFs.Mkdir(context.Background(), "test")
require.NoError(t, err)
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
func TestInternalUploadTempFileOperations(t *testing.T) {
id := "tiutfo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
func TestInternalUploadUploadingFileOperations(t *testing.T) {
id := "tiuufo"
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
nil,
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
defer runInstance.cleanupFs(t, rootFs, boltDb)
boltDb.PurgeTempUploads()

View File

@@ -8,7 +8,7 @@ import (
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
continue
}
var data []byte
data, err = io.ReadAll(resp.Body)
data, err = ioutil.ReadAll(resp.Body)
if err != nil {
continue
}

View File

@@ -9,6 +9,7 @@ import (
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -472,7 +473,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
var data []byte
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
data, err := os.ReadFile(fp)
data, err := ioutil.ReadFile(fp)
if err != nil {
return nil, err
}
@@ -485,7 +486,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
err := os.WriteFile(filePath, data, os.ModePerm)
err := ioutil.WriteFile(filePath, data, os.ModePerm)
if err != nil {
return err
}

View File

@@ -12,6 +12,7 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
@@ -1037,7 +1038,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
if err != nil {
return err
}
metadata, err := io.ReadAll(reader)
metadata, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return err
@@ -1096,7 +1097,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
if err != nil {
return "", err
}
data, err := io.ReadAll(reader)
data, err := ioutil.ReadAll(reader)
_ = reader.Close() // ensure file handle is freed on windows
if err != nil {
return "", err

View File

@@ -5,7 +5,7 @@ import (
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"path"
"regexp"
"strings"
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
if r == nil {
return
}
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
assert.NoError(t, err)
var chunkContents []byte
assert.NotPanics(t, func() {
chunkContents, err = io.ReadAll(r)
chunkContents, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
r, err = willyChunk.Open(ctx)
assert.NoError(t, err)
assert.NotPanics(t, func() {
_, err = io.ReadAll(r)
_, err = ioutil.ReadAll(r)
_ = r.Close()
})
assert.NoError(t, err)
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
assert.NoError(t, err, "open "+description)
assert.NotNil(t, r, "open stream of "+description)
if err == nil && r != nil {
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err, "read all of "+description)
assert.Equal(t, contents, string(data), description+" contents is ok")
_ = r.Close()
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
assert.Error(t, err)
// Rcat must fail
in := io.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
robj, err := operations.Rcat(ctx, f, file, in, modTime)
assert.Nil(t, robj)
assert.NotNil(t, err)
if err != nil {
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
r, err := dstFile.Open(ctx)
assert.NoError(t, err)
assert.NotNil(t, r)
data, err := io.ReadAll(r)
data, err := ioutil.ReadAll(r)
assert.NoError(t, err)
assert.Equal(t, contents, string(data))
_ = r.Close()

View File

@@ -631,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
if err != nil {
return nil, err
}
uSrc := fs.NewOverrideRemote(src, uRemote)
uSrc := operations.NewOverrideRemote(src, uRemote)
var o fs.Object
if stream {
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)

View File

@@ -13,6 +13,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"strings"
@@ -28,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
)
@@ -367,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil {
return nil, err
}
meta, err := readMetadata(ctx, mo)
if err != nil {
return nil, fmt.Errorf("error decoding metadata: %w", err)
meta := readMetadata(ctx, mo)
if meta == nil {
return nil, errors.New("error decoding metadata")
}
// Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil {
return nil, err
}
return f.newObject(o, mo, meta), nil
return f.newObject(o, mo, meta), err
}
// checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -467,7 +464,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
}
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
tempFile, err := os.CreateTemp("", "rclone-press-")
tempFile, err := ioutil.TempFile("", "rclone-press-")
defer func() {
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
// to ignore them
@@ -545,8 +542,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
}
// Transfer the data
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
if err != nil {
if o != nil {
removeErr := o.Remove(ctx)
@@ -680,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
}
return nil, err
}
return f.newObject(dataObject, mo, meta), nil
return f.newObject(dataObject, mo, meta), err
}
// Put in to the remote path with the modTime given of the given size
@@ -1043,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
}
// This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) {
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
// Open our meradata object
rc, err := mo.Open(ctx)
if err != nil {
return nil, err
return nil
}
defer fs.CheckClose(rc, &err)
defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
jr := json.NewDecoder(rc)
meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil {
return nil, err
return nil
}
return meta, nil
return meta
}
// Remove removes this object
@@ -1100,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr
@@ -1116,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
// If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
if err != nil {
return err
}
}
if err != nil {
return err
}
// Update object metadata and return
o.Object = newObject.Object
@@ -1129,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{
Object: o,
f: f,
@@ -1144,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{
Object: o,
f: f,
@@ -1174,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err
}
if o.meta == nil {
o.meta, err = readMetadata(ctx, o.mo)
o.meta = readMetadata(ctx, o.mo)
}
return err
}

View File

@@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"testing"
@@ -1072,7 +1073,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
source := newRandomSource(copySize)
encrypted, err := c.newEncrypter(source, nil)
assert.NoError(t, err)
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
assert.NoError(t, err)
sink := newRandomSource(copySize)
n, err := io.CopyBuffer(sink, decrypted, buf)
@@ -1143,15 +1144,15 @@ func TestEncryptData(t *testing.T) {
buf := bytes.NewBuffer(test.in)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
out, err := io.ReadAll(encrypted)
out, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err)
assert.Equal(t, test.expected, out)
// Check we can decode the data properly too...
buf = bytes.NewBuffer(out)
decrypted, err := c.DecryptData(io.NopCloser(buf))
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
assert.NoError(t, err)
out, err = io.ReadAll(decrypted)
out, err = ioutil.ReadAll(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.in, out)
}
@@ -1186,7 +1187,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
fh, err := c.newEncrypter(in, nil)
assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(32), n)
}
@@ -1256,12 +1257,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
in1 := bytes.NewBuffer(file16)
in := io.NopCloser(io.MultiReader(in1, in2))
in := ioutil.NopCloser(io.MultiReader(in1, in2))
fh, err := c.newDecrypter(in)
assert.NoError(t, err)
n, err := io.CopyN(io.Discard, fh, 1e6)
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
assert.Equal(t, io.ErrUnexpectedEOF, err)
assert.Equal(t, int64(16), n)
}
@@ -1273,14 +1274,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
// Make random data
const dataSize = 150000
plaintext, err := io.ReadAll(newRandomSource(dataSize))
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
assert.NoError(t, err)
// Encrypt the data
buf := bytes.NewBuffer(plaintext)
encrypted, err := c.EncryptData(buf)
assert.NoError(t, err)
ciphertext, err := io.ReadAll(encrypted)
ciphertext, err := ioutil.ReadAll(encrypted)
assert.NoError(t, err)
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
@@ -1299,7 +1300,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
end = len(ciphertext)
}
}
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
return reader, nil
}
@@ -1489,7 +1490,7 @@ func TestDecrypterRead(t *testing.T) {
assert.NoError(t, err, what)
continue
}
_, err = io.ReadAll(fh)
_, err = ioutil.ReadAll(fh)
var expectedErr error
switch {
case i == fileHeaderSize:
@@ -1513,7 +1514,7 @@ func TestDecrypterRead(t *testing.T) {
cd := newCloseDetector(in)
fh, err := c.newDecrypter(cd)
assert.NoError(t, err)
_, err = io.ReadAll(fh)
_, err = ioutil.ReadAll(fh)
assert.Error(t, err, "potato")
assert.Equal(t, 0, cd.closed)
@@ -1523,13 +1524,13 @@ func TestDecrypterRead(t *testing.T) {
copy(file16copy, file16)
for i := range file16copy {
file16copy[i] ^= 0xFF
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
if i < fileMagicSize {
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
assert.Nil(t, fh)
} else {
assert.NoError(t, err)
_, err = io.ReadAll(fh)
_, err = ioutil.ReadAll(fh)
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
}
file16copy[i] ^= 0xFF
@@ -1564,7 +1565,7 @@ func TestDecrypterClose(t *testing.T) {
assert.Equal(t, 0, cd.closed)
// close after reading
out, err := io.ReadAll(fh)
out, err := ioutil.ReadAll(fh)
assert.NoError(t, err)
assert.Equal(t, []byte{1}, out)
assert.Equal(t, io.EOF, fh.err)

View File

@@ -396,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
// put implements Put or PutStream
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
ci := fs.GetConfig(ctx)
if f.opt.NoDataEncryption {
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
if err == nil && o != nil {
@@ -415,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
// Find a hash the destination supports to compute a hash of
// the encrypted data
ht := f.Fs.Hashes().GetOne()
if ci.IgnoreChecksum {
ht = hash.None
}
var hasher *hash.MultiHasher
if ht != hash.None {
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
@@ -1052,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
// Get the underlying object if there is one
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
// Prefer direct interface assertion
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
// Unwrap if it is an operations.OverrideRemote
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
// Otherwise likely is an operations.OverrideRemote
srcObj = do.UnWrap()
} else {
// Otherwise don't unwrap any further
return "", nil
}
// if this is wrapping a local object then we work out the hash

View File

@@ -17,28 +17,41 @@ import (
"github.com/stretchr/testify/require"
)
type testWrapper struct {
fs.ObjectInfo
}
// UnWrap returns the Object that this Object is wrapping or nil if it
// isn't wrapping anything
func (o testWrapper) UnWrap() fs.Object {
if o, ok := o.ObjectInfo.(fs.Object); ok {
return o
}
return nil
}
// Create a temporary local fs to upload things from
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
localFs, err := fs.TemporaryLocalFs(context.Background())
require.NoError(t, err)
t.Cleanup(func() {
cleanup = func() {
require.NoError(t, localFs.Rmdir(context.Background(), ""))
})
return localFs
}
return localFs, cleanup
}
// Upload a file to a remote
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
inBuf := bytes.NewBufferString(contents)
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
obj, err := f.Put(context.Background(), inBuf, upSrc)
require.NoError(t, err)
t.Cleanup(func() {
cleanup = func() {
require.NoError(t, obj.Remove(context.Background()))
})
return obj
}
return obj, cleanup
}
// Test the ObjectInfo
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
path = "_wrap"
}
localFs := makeTempLocalFs(t)
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
obj := uploadFile(t, localFs, path, contents)
obj, cleanupObj := uploadFile(t, localFs, path, contents)
defer cleanupObj()
// encrypt the data
inBuf := bytes.NewBufferString(contents)
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
var oi fs.ObjectInfo = obj
if wrap {
// wrap the object in an fs.ObjectUnwrapper if required
oi = fs.NewOverrideRemote(oi, "new_remote")
oi = testWrapper{oi}
}
// wrap the object in a crypt for upload using the nonce we
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
t.Skipf("%v: does not support hashes", f.Fs)
}
localFs := makeTempLocalFs(t)
localFs, cleanupLocalFs := makeTempLocalFs(t)
defer cleanupLocalFs()
// Upload a file to localFs as a test object
localObj := uploadFile(t, localFs, path, contents)
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
defer cleanupLocalObj()
// Upload the same data to the remote Fs also
remoteObj := uploadFile(t, f, path, contents)
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
defer cleanupRemoteObj()
// Calculate the expected Hash of the remote object
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)

View File

@@ -14,10 +14,11 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"os"
"path"
"regexp"
"sort"
"strconv"
"strings"
@@ -1107,7 +1108,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
// try loading service account credentials from env variable, then from a file
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}
@@ -3430,12 +3431,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
if err != nil {
return nil, err
}
re := regexp.MustCompile(`[^\w_. -]+`)
if _, ok := opt["config"]; ok {
lines := []string{}
upstreams := []string{}
names := make(map[string]struct{}, len(drives))
for i, drive := range drives {
name := fspath.MakeConfigName(drive.Name)
name := re.ReplaceAllString(drive.Name, "_")
for {
if _, found := names[name]; !found {
break
@@ -3798,7 +3800,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
data = data[:limit]
}
return io.NopCloser(bytes.NewReader(data)), nil
return ioutil.NopCloser(bytes.NewReader(data)), nil
}
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"os"
"path"
@@ -77,7 +78,7 @@ var additionalMimeTypes = map[string]string{
// Load the example export formats into exportFormats for testing
func TestInternalLoadExampleFormats(t *testing.T) {
fetchFormatsOnce.Do(func() {})
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
var about struct {
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
ImportFormats map[string][]string `json:"importFormats,omitempty"`

View File

@@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -1185,7 +1186,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return nil, errors.New("can't download - no id")
}
if o.contentType == emptyMimeType {
return io.NopCloser(bytes.NewReader([]byte{})), nil
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
}
fs.FixRangeOption(options, o.size)
resp, err := o.fs.rpc(ctx, "getFile", params{

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/rclone/ftp"
"github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -70,7 +70,7 @@ func init() {
When using implicit FTP over TLS the client connects using TLS
right from the start which breaks compatibility with
non-TLS-aware servers. This is usually served over port 990 rather
than port 21. Cannot be used in combination with explicit FTPS.`,
than port 21. Cannot be used in combination with explicit FTP.`,
Default: false,
}, {
Name: "explicit_tls",
@@ -78,7 +78,7 @@ than port 21. Cannot be used in combination with explicit FTPS.`,
When using explicit FTP over TLS the client explicitly requests
security from the server in order to upgrade a plain text connection
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
to an encrypted one. Cannot be used in combination with implicit FTP.`,
Default: false,
}, {
Name: "concurrency",
@@ -124,11 +124,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false,
Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -210,7 +205,6 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"`
@@ -336,44 +330,14 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if err != nil {
return nil, err
if f.tlsConf != nil && err == nil {
conn = tls.Client(conn, f.tlsConf)
}
// Connect using cleartext only for non TLS
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
return
}
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -381,6 +345,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
}
if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -397,9 +367,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
}
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
}
@@ -657,7 +624,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
// findItem finds a directory entry for the name in its parent directory
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
if remote == "" || remote == "." || remote == "/" {
fullPath := path.Join(f.root, remote)
if fullPath == "" || fullPath == "." || fullPath == "/" {
// if root, assume exists and synthesize an entry
return &ftp.Entry{
Name: "",
@@ -665,32 +633,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
Time: time.Now(),
}, nil
}
dir := path.Dir(fullPath)
base := path.Base(fullPath)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("findItem: %w", err)
}
// returns TRUE if MLST is supported which is required to call GetEntry
if c.IsTimePreciseInList() {
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
f.putFtpConnection(&c, err)
if err != nil {
err = translateErrorFile(err)
if err == fs.ErrorObjectNotFound {
return nil, nil
}
return nil, err
}
if entry != nil {
f.entryToStandard(entry)
}
return entry, nil
}
dir := path.Dir(remote)
base := path.Base(remote)
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
@@ -709,7 +658,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
entry, err := f.findItem(ctx, path.Join(f.root, remote))
entry, err := f.findItem(ctx, remote)
if err != nil {
return nil, err
}
@@ -731,7 +680,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
// dirExists checks the directory pointed to by remote exists or not
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
entry, err := f.findItem(ctx, path.Join(f.root, remote))
entry, err := f.findItem(ctx, remote)
if err != nil {
return false, fmt.Errorf("dirExists: %w", err)
}
@@ -875,18 +824,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// getInfo reads the FileInfo for a path
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
file, err := f.findItem(ctx, remote)
dir := path.Dir(remote)
base := path.Base(remote)
c, err := f.getFtpConnection(ctx)
if err != nil {
return nil, err
} else if file != nil {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
return nil, fmt.Errorf("getInfo: %w", err)
}
files, err := c.List(f.dirFromStandardPath(dir))
f.putFtpConnection(&c, err)
if err != nil {
return nil, translateErrorFile(err)
}
for i := range files {
file := files[i]
f.entryToStandard(file)
if file.Name == base {
info := &FileInfo{
Name: remote,
Size: file.Size,
ModTime: file.Time,
precise: f.fLstTime,
IsDir: file.Type == ftp.EntryTypeFolder,
}
return info, nil
}
return info, nil
}
return nil, fs.ErrorObjectNotFound
}

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) {
const (
fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
fileSize = 100000000 // 100 MiB
idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup
)
if testing.Short() {

View File

@@ -19,8 +19,8 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
"strconv"
"strings"
@@ -487,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// try loading service account credentials from env variable, then from a file
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
if err != nil {
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
}

View File

@@ -3,7 +3,7 @@ package googlephotos
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"path"
"testing"
@@ -12,6 +12,7 @@ import (
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()
@@ -98,7 +99,7 @@ func TestIntegration(t *testing.T) {
t.Run("ObjectOpen", func(t *testing.T) {
in, err := dstObj.Open(ctx)
require.NoError(t, err)
buf, err := io.ReadAll(in)
buf, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.NoError(t, in.Close())
assert.True(t, len(buf) > 1000)
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
require.NoError(t, err)
in, err := srcObj.Open(ctx)
require.NoError(t, err)
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
require.NoError(t, err)
assert.Equal(t, remote, dstObj.Remote())
_ = in.Close()

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"time"
@@ -117,7 +118,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
defer func() {
_ = r.Close()
}()
if _, err = io.Copy(io.Discard, r); err != nil {
if _, err = io.Copy(ioutil.Discard, r); err != nil {
fs.Infof(o, "update failed (copy): %v", err)
return err
}

View File

@@ -13,6 +13,7 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -304,7 +305,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
fs: f,
remote: remote,
}
err := o.head(ctx)
err := o.stat(ctx)
if err != nil {
return nil, err
}
@@ -316,6 +317,15 @@ func (f *Fs) url(remote string) string {
return f.endpointURL + rest.URLPathEscape(remote)
}
// parse s into an int64, on failure return def
func parseInt64(s string, def int64) int64 {
n, e := strconv.ParseInt(s, 10, 64)
if e != nil {
return def
}
return n
}
// Errors returned by parseName
var (
errURLJoinFailed = errors.New("URLJoin failed")
@@ -490,7 +500,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
fs: f,
remote: remote,
}
switch err := file.head(ctx); err {
switch err := file.stat(ctx); err {
case nil:
add(file)
case fs.ErrorNotAFile:
@@ -569,8 +579,8 @@ func (o *Object) url() string {
return o.fs.url(o.remote)
}
// head sends a HEAD request to update info fields in the Object
func (o *Object) head(ctx context.Context) error {
// stat updates the info field in the Object
func (o *Object) stat(ctx context.Context) error {
if o.fs.opt.NoHead {
o.size = -1
o.modTime = timeUnset
@@ -591,19 +601,13 @@ func (o *Object) head(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to stat: %w", err)
}
return o.decodeMetadata(ctx, res)
}
// decodeMetadata updates info fields in the Object according to HTTP response headers
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
t = timeUnset
}
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
o.modTime = t
o.contentType = res.Header.Get("Content-Type")
o.size = rest.ParseSizeFromHeaders(res.Header)
// If NoSlash is set then check ContentType to see if it is a directory
if o.fs.opt.NoSlash {
mediaType, _, err := mime.ParseMediaType(o.contentType)
@@ -649,9 +653,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, fmt.Errorf("Open failed: %w", err)
}
if err = o.decodeMetadata(ctx, res); err != nil {
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
}
return res.Body, nil
}

View File

@@ -3,7 +3,7 @@ package http
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@@ -33,21 +33,20 @@ var (
lineEndSize = 1
)
// prepareServer prepares the test server and shuts it down automatically
// when the test completes.
func prepareServer(t *testing.T) configmap.Simple {
// prepareServer the test server and return a function to tidy it up afterwards
func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := os.ReadDir(filesPath)
fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
@@ -79,21 +78,20 @@ func prepareServer(t *testing.T) configmap.Simple {
"url": ts.URL,
"headers": strings.Join(headers, ","),
}
t.Cleanup(ts.Close)
return m
// return a function to tidy up
return m, ts.Close
}
// prepare prepares the test server and shuts it down automatically
// when the test completes.
func prepare(t *testing.T) fs.Fs {
m := prepareServer(t)
// prepare the test server and return a function to tidy it up afterwards
func prepare(t *testing.T) (fs.Fs, func()) {
m, tidy := prepareServer(t)
// Instantiate it
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
return f
return f, tidy
}
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
@@ -136,19 +134,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
}
func TestListRoot(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
testListRoot(t, f, false)
}
func TestListRootNoSlash(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
f.(*Fs).opt.NoSlash = true
defer tidy()
testListRoot(t, f, true)
}
func TestListSubDir(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
entries, err := f.List(context.Background(), "three")
require.NoError(t, err)
@@ -165,7 +166,8 @@ func TestListSubDir(t *testing.T) {
}
func TestNewObject(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -192,69 +194,36 @@ func TestNewObject(t *testing.T) {
}
func TestOpen(t *testing.T) {
m := prepareServer(t)
f, tidy := prepare(t)
defer tidy()
for _, head := range []bool{false, true} {
if !head {
m.Set("no_head", "true")
}
f, err := NewFs(context.Background(), remoteName, "", m)
require.NoError(t, err)
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
for _, rangeRead := range []bool{false, true} {
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
if !head {
// Test mod time is still indeterminate
tObj := o.ModTime(context.Background())
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
// Test file size is still indeterminate
assert.Equal(t, int64(-1), o.Size())
}
var data []byte
if !rangeRead {
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
} else {
// Test with range request
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = io.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
// Test the time is always correct on the object after file open
tObj := o.ModTime(context.Background())
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
if !rangeRead {
// Test the file size
assert.Equal(t, int64(len(data)), o.Size())
}
}
// Test normal read
fd, err := o.Open(context.Background())
require.NoError(t, err)
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
require.NoError(t, err)
data, err = ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
assert.Equal(t, "eetro", string(data))
}
func TestMimeType(t *testing.T) {
f := prepare(t)
f, tidy := prepare(t)
defer tidy()
o, err := f.NewObject(context.Background(), "four/under four.txt")
require.NoError(t, err)
@@ -265,7 +234,8 @@ func TestMimeType(t *testing.T) {
}
func TestIsAFileRoot(t *testing.T) {
m := prepareServer(t)
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)
@@ -274,7 +244,8 @@ func TestIsAFileRoot(t *testing.T) {
}
func TestIsAFileSubDir(t *testing.T) {
m := prepareServer(t)
m, tidy := prepareServer(t)
defer tidy()
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
assert.Equal(t, err, fs.ErrorIsFile)

62
backend/hubic/auth.go Normal file
View File

@@ -0,0 +1,62 @@
package hubic
import (
"context"
"net/http"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
)
// auth is an authenticator for swift
type auth struct {
f *Fs
}
// newAuth creates a swift authenticator
func newAuth(f *Fs) *auth {
return &auth{
f: f,
}
}
// Request constructs an http.Request for authentication
//
// returns nil for not needed
func (a *auth) Request(ctx context.Context, c *swift.Connection) (r *http.Request, err error) {
const retries = 10
for try := 1; try <= retries; try++ {
err = a.f.getCredentials(context.TODO())
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
fs.Debugf(a.f, "retrying auth request %d/%d: %v", try, retries, err)
}
return nil, err
}
// Response parses the result of an http request
func (a *auth) Response(ctx context.Context, resp *http.Response) error {
return nil
}
// The public storage URL - set Internal to true to read
// internal/service net URL
func (a *auth) StorageUrl(Internal bool) string { // nolint
return a.f.credentials.Endpoint
}
// The access token
func (a *auth) Token() string {
return a.f.credentials.Token
}
// The CDN url if available
func (a *auth) CdnUrl() string { // nolint
return ""
}
// Check the interfaces are satisfied
var _ swift.Authenticator = (*auth)(nil)

200
backend/hubic/hubic.go Normal file
View File

@@ -0,0 +1,200 @@
// Package hubic provides an interface to the Hubic object storage
// system.
package hubic
// This uses the normal swift mechanism to update the credentials and
// ignores the expires field returned by the Hubic API. This may need
// to be revisited after some actual experience.
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"strings"
"time"
swiftLib "github.com/ncw/swift/v2"
"github.com/rclone/rclone/backend/swift"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
const (
rcloneClientID = "api_hubic_svWP970PvSWbw5G3PzrAqZ6X2uHeZBPI"
rcloneEncryptedClientSecret = "leZKCcqy9movLhDWLVXX8cSLp_FzoiAPeEJOIOMRw1A5RuC4iLEPDYPWVF46adC_MVonnLdVEOTHVstfBOZ_lY4WNp8CK_YWlpRZ9diT5YI"
)
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauth2.Config{
Scopes: []string{
"credentials.r", // Read OpenStack credentials
},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.hubic.com/oauth/auth/",
TokenURL: "https://api.hubic.com/oauth/token/",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
}
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "hubic",
Description: "Hubic",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
return oauthutil.ConfigOut("", &oauthutil.Options{
OAuth2Config: oauthConfig,
})
},
Options: append(oauthutil.SharedOptions, swift.SharedOptions...),
})
}
// credentials is the JSON returned from the Hubic API to read the
// OpenStack credentials
type credentials struct {
Token string `json:"token"` // OpenStack token
Endpoint string `json:"endpoint"` // OpenStack endpoint
Expires string `json:"expires"` // Expires date - e.g. "2015-11-09T14:24:56+01:00"
}
// Fs represents a remote hubic
type Fs struct {
fs.Fs // wrapped Fs
features *fs.Features // optional features
client *http.Client // client for oauth api
credentials credentials // returned from the Hubic API
expires time.Time // time credentials expire
}
// Object describes a swift object
type Object struct {
*swift.Object
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.Object.String()
}
// ------------------------------------------------------------
// String converts this Fs to a string
func (f *Fs) String() string {
if f.Fs == nil {
return "Hubic"
}
return fmt.Sprintf("Hubic %s", f.Fs.String())
}
// getCredentials reads the OpenStack Credentials using the Hubic API
//
// The credentials are read into the Fs
func (f *Fs) getCredentials(ctx context.Context) (err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "https://api.hubic.com/1.0/account/credentials", nil)
if err != nil {
return err
}
resp, err := f.client.Do(req)
if err != nil {
return err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
body, _ := ioutil.ReadAll(resp.Body)
bodyStr := strings.TrimSpace(strings.ReplaceAll(string(body), "\n", " "))
return fmt.Errorf("failed to get credentials: %s: %s", resp.Status, bodyStr)
}
decoder := json.NewDecoder(resp.Body)
var result credentials
err = decoder.Decode(&result)
if err != nil {
return err
}
// fs.Debugf(f, "Got credentials %+v", result)
if result.Token == "" || result.Endpoint == "" || result.Expires == "" {
return errors.New("couldn't read token, result and expired from credentials")
}
f.credentials = result
expires, err := time.Parse(time.RFC3339, result.Expires)
if err != nil {
return err
}
f.expires = expires
fs.Debugf(f, "Got swift credentials (expiry %v in %v)", f.expires, time.Until(f.expires))
return nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
client, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
if err != nil {
return nil, fmt.Errorf("failed to configure Hubic: %w", err)
}
f := &Fs{
client: client,
}
// Make the swift Connection
ci := fs.GetConfig(ctx)
c := &swiftLib.Connection{
Auth: newAuth(f),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
err = c.Authenticate(ctx)
if err != nil {
return nil, fmt.Errorf("error authenticating swift connection: %w", err)
}
// Parse config into swift.Options struct
opt := new(swift.Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Make inner swift Fs from the connection
swiftFs, err := swift.NewFsWithConnection(ctx, opt, name, root, c, true)
if err != nil && err != fs.ErrorIsFile {
return nil, err
}
f.Fs = swiftFs
f.features = f.Fs.Features().Wrap(f)
return f, err
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// UnWrap returns the Fs that this Fs is wrapping
func (f *Fs) UnWrap() fs.Fs {
return f.Fs
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
)

View File

@@ -0,0 +1,19 @@
// Test Hubic filesystem interface
package hubic_test
import (
"testing"
"github.com/rclone/rclone/backend/hubic"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestHubic:",
NilObject: (*hubic.Object)(nil),
SkipFsCheckWrap: true,
SkipObjectCheckWrap: true,
})
}

View File

@@ -12,6 +12,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -821,7 +822,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
func grantTypeFilter(req *http.Request) {
if legacyTokenURL == req.URL.String() {
// read the entire body
refreshBody, err := io.ReadAll(req.Body)
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
@@ -831,7 +832,7 @@ func grantTypeFilter(req *http.Request) {
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
@@ -1788,7 +1789,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
var tempFile *os.File
// create the cache file
tempFile, err = os.CreateTemp("", cachePrefix)
tempFile, err = ioutil.TempFile("", cachePrefix)
if err != nil {
return
}
@@ -1816,7 +1817,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
} else {
// that's a small file, just read it into memory
var inData []byte
inData, err = io.ReadAll(teeReader)
inData, err = ioutil.ReadAll(teeReader)
if err != nil {
return
}
@@ -1913,7 +1914,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// copy the already uploaded bytes into the trash :)
var result api.UploadResponse
_, err = io.CopyN(io.Discard, in, response.ResumePos)
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
if err != nil {
return err
}

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -32,6 +33,7 @@ import (
// Constants
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
// Register with Fs
@@ -71,7 +73,7 @@ supported by all file systems) under the "user.*" prefix.
Advanced: true,
}, {
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension.",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false,
NoPrefix: true,
ShortOpt: "l",
@@ -122,8 +124,8 @@ routine so this flag shouldn't normally be used.`,
Help: `Don't check to see if the files change during upload.
Normally rclone checks the size and modification time of files as they
are being uploaded and aborts with a message which starts "can't copy -
source file is being updated" if the file changes during upload.
are being uploaded and aborts with a message which starts "can't copy
- source file is being updated" if the file changes during upload.
However on some file systems this modification time check may fail (e.g.
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
@@ -374,8 +376,8 @@ func (f *Fs) caseInsensitive() bool {
//
// for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink
}
@@ -502,7 +504,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
continue
}
}
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
fs.Errorf(dir, "%v", fierr)
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
continue
@@ -519,6 +521,11 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
name := fi.Name()
mode := fi.Mode()
newRemote := f.cleanRemote(dir, name)
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
// Follow symlinks if required
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
@@ -535,11 +542,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
}
mode = fi.Mode()
}
// Don't include non directory if not included
// we leave directory filtering to the layer above
if useFilter && !fi.IsDir() && !filter.IncludeRemote(newRemote) {
continue
}
if fi.IsDir() {
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
@@ -550,7 +552,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
newRemote += linkSuffix
}
fso, err := f.newObjectWithInfo(newRemote, fi)
if err != nil {
@@ -644,7 +646,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
precision = time.Second
// Create temporary file and test it
fd, err := os.CreateTemp("", "rclone")
fd, err := ioutil.TempFile("", "rclone")
if err != nil {
// If failed return 1s
// fmt.Println("Failed to create temp file", err)
@@ -1071,7 +1073,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
if err != nil {
return nil, err
}
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
}
// Open an object for read
@@ -1398,27 +1400,30 @@ func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
}
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS != "windows" || !strings.HasPrefix(s, "\\") {
if !filepath.IsAbs(s) {
if runtime.GOOS == "windows" {
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
} else {
s = filepath.Clean(s)
}
}
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
// Convert to UNC
s = file.UNCPath(s)
}
return s
}
if !filepath.IsAbs(s) {
s2, err := filepath.Abs(s)
if err == nil {
s = s2
}
}
s = enc.FromStandardPath(s)
return s
}

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -33,6 +33,7 @@ func TestMain(m *testing.M) {
// Test copy with source file that's updating
func TestUpdatingCheck(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
filePath := "sub dir/local test"
r.WriteFile(filePath, "content", time.Now())
@@ -77,6 +78,7 @@ func TestUpdatingCheck(t *testing.T) {
func TestSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
f := r.Flocal.(*Fs)
dir := f.root
@@ -91,7 +93,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -120,7 +122,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
r.CheckLocalItems(t, file1, file2, file3)
@@ -136,9 +138,9 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version
@@ -148,7 +150,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object
in, err := o.Open(ctx)
require.NoError(t, err)
contents, err := io.ReadAll(in)
contents, err := ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt", string(contents))
require.NoError(t, in.Close())
@@ -156,7 +158,7 @@ func TestSymlink(t *testing.T) {
// Check reading the object with range
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
require.NoError(t, err)
contents, err = io.ReadAll(in)
contents, err = ioutil.ReadAll(in)
require.NoError(t, err)
require.Equal(t, "file.txt"[2:5+1], string(contents))
require.NoError(t, in.Close())
@@ -175,6 +177,7 @@ func TestSymlinkError(t *testing.T) {
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -205,6 +208,7 @@ func TestHashOnUpdate(t *testing.T) {
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
@@ -233,6 +237,7 @@ func TestHashOnDelete(t *testing.T) {
func TestMetadata(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "metafile.txt"
when := time.Now()
const dayLength = len("2001-01-01")
@@ -367,6 +372,7 @@ func TestMetadata(t *testing.T) {
func TestFilter(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
r.WriteFile("included", "included file", when)
r.WriteFile("excluded", "excluded file", when)

View File

@@ -5,41 +5,19 @@ package local
import (
"fmt"
"sync"
"time"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
var (
statxCheckOnce sync.Once
readMetadataFromFileFn func(o *Object, m *fs.Metadata) (err error)
)
// Read the metadata from the file into metadata where possible
func (o *Object) readMetadataFromFile(m *fs.Metadata) (err error) {
statxCheckOnce.Do(func() {
// Check statx() is available as it was only introduced in kernel 4.11
// If not, fall back to fstatat() which was introduced in 2.6.16 which is guaranteed for all Go versions
var stat unix.Statx_t
if unix.Statx(unix.AT_FDCWD, ".", 0, unix.STATX_ALL, &stat) != unix.ENOSYS {
readMetadataFromFileFn = readMetadataFromFileStatx
} else {
readMetadataFromFileFn = readMetadataFromFileFstatat
}
})
return readMetadataFromFileFn(o, m)
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Statx_t
// statx() was added to Linux in kernel 4.11
err = unix.Statx(unix.AT_FDCWD, o.path, flags, (0 |
unix.STATX_TYPE | // Want stx_mode & S_IFMT
unix.STATX_MODE | // Want stx_mode & ~S_IFMT
@@ -67,36 +45,3 @@ func readMetadataFromFileStatx(o *Object, m *fs.Metadata) (err error) {
setTime("btime", stat.Btime)
return nil
}
// Read the metadata from the file into metadata where possible
func readMetadataFromFileFstatat(o *Object, m *fs.Metadata) (err error) {
flags := unix.AT_SYMLINK_NOFOLLOW
if o.fs.opt.FollowSymlinks {
flags = 0
}
var stat unix.Stat_t
// fstatat() was added to Linux in kernel 2.6.16
// Go only supports 2.6.32 or later
err = unix.Fstatat(unix.AT_FDCWD, o.path, &stat, flags)
if err != nil {
return err
}
m.Set("mode", fmt.Sprintf("%0o", stat.Mode))
m.Set("uid", fmt.Sprintf("%d", stat.Uid))
m.Set("gid", fmt.Sprintf("%d", stat.Gid))
if stat.Rdev != 0 {
m.Set("rdev", fmt.Sprintf("%x", stat.Rdev))
}
setTime := func(key string, t unix.Timespec) {
// The types of t.Sec and t.Nsec vary from int32 to int64 on
// different Linux architectures so we need to cast them to
// int64 here and hence need to quiet the linter about
// unecessary casts.
//
// nolint: unconvert
m.Set(key, time.Unix(int64(t.Sec), int64(t.Nsec)).Format(metadataTimeFormat))
}
setTime("atime", stat.Atim)
setTime("mtime", stat.Mtim)
return nil
}

View File

@@ -1,6 +1,7 @@
package local
import (
"io/ioutil"
"os"
"sync"
"testing"
@@ -12,7 +13,7 @@ import (
// Check we can remove an open file
func TestRemove(t *testing.T) {
fd, err := os.CreateTemp("", "rclone-remove-test")
fd, err := ioutil.TempFile("", "rclone-remove-test")
require.NoError(t, err)
name := fd.Name()
defer func() {

View File

@@ -69,11 +69,6 @@ func (w *BinWriter) WritePu64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteP64 writes an signed long as unsigned varint
func (w *BinWriter) WriteP64(val int64) {
w.b.Write(w.a[:binary.PutUvarint(w.a, uint64(val))])
}
// WriteString writes a zero-terminated string
func (w *BinWriter) WriteString(str string) {
buf := []byte(str)

View File

@@ -18,6 +18,7 @@ import (
"encoding/hex"
"encoding/json"
"io/ioutil"
"net/http"
"net/url"
@@ -90,13 +91,8 @@ func init() {
Help: "User name (usually email).",
Required: true,
}, {
Name: "pass",
Help: `Password.
This must be an app password - rclone will not work with your normal
password. See the Configuration section in the docs for how to make an
app password.
`,
Name: "pass",
Help: "Password.",
Required: true,
IsPassword: true,
}, {
@@ -645,7 +641,12 @@ func (f *Fs) itemToDirEntry(ctx context.Context, item *api.ListItem) (entry fs.D
return nil, -1, err
}
modTime := time.Unix(int64(item.Mtime), 0)
mTime := int64(item.Mtime)
if mTime < 0 {
fs.Debugf(f, "Fixing invalid timestamp %d on mailru file %q", mTime, remote)
mTime = 0
}
modTime := time.Unix(mTime, 0)
isDir, err := f.isDir(item.Kind, remote)
if err != nil {
@@ -1659,7 +1660,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Attempt to put by calculating hash in memory
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
fileBuf, err = io.ReadAll(in)
fileBuf, err = ioutil.ReadAll(in)
if err != nil {
return err
}
@@ -1702,7 +1703,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if size <= mrhash.Size {
// Optimize upload: skip extra request if data fits in the hash buffer.
if fileBuf == nil {
fileBuf, err = io.ReadAll(wrapIn)
fileBuf, err = ioutil.ReadAll(wrapIn)
}
if fileHash == nil && err == nil {
fileHash = mrhash.Sum(fileBuf)
@@ -2057,7 +2058,7 @@ func (o *Object) addFileMetaData(ctx context.Context, overwrite bool) error {
req.WritePu16(0) // revision
req.WriteString(o.fs.opt.Enc.FromStandardPath(o.absPath()))
req.WritePu64(o.size)
req.WriteP64(o.modTime.Unix())
req.WritePu64(o.modTime.Unix())
req.WritePu32(0)
req.Write(o.mrHash)
@@ -2213,7 +2214,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
fs.Debugf(o, "Server returned full content instead of range")
if start > 0 {
// Discard the beginning of the data
_, err = io.CopyN(io.Discard, wrapStream, start)
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
if err != nil {
closeBody(res)
return nil, err

View File

@@ -8,6 +8,7 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"path"
"strings"
"sync"
@@ -574,7 +575,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
data = data[:limit]
}
return io.NopCloser(bytes.NewBuffer(data)), nil
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
}
// Update the object with the contents of the io.Reader, modTime and size
@@ -582,7 +583,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := io.ReadAll(in)
data, err := ioutil.ReadAll(in)
if err != nil {
return fmt.Errorf("failed to update memory object: %w", err)
}

View File

@@ -12,6 +12,7 @@ import (
"fmt"
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@@ -971,7 +972,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
URL = o.fs.url(src.Remote())
}
if strings.HasSuffix(URL, ".rclonelink") {
bits, err := io.ReadAll(in)
bits, err := ioutil.ReadAll(in)
if err != nil {
return err
}
@@ -1057,7 +1058,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
reader := strings.NewReader(o.target)
readcloser := io.NopCloser(reader)
readcloser := ioutil.NopCloser(reader)
return readcloser, nil
}

View File

@@ -891,12 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
f.features.ChangeNotify = nil
}
// Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, _, err := f.readMetaDataForPath(ctx, "")

View File

@@ -1,158 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"crypto/rsa"
"errors"
"net/http"
"os"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/common/auth"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
)
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
switch opt.Provider {
case instancePrincipal:
return auth.InstancePrincipalConfigurationProvider()
case userPrincipal:
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
}
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
case resourcePrincipal:
return auth.ResourcePrincipalConfigurationProvider()
case noAuth:
fs.Infof("client", "using no auth provider")
return getNoAuthConfiguration()
default:
}
return common.DefaultConfigProvider(), nil
}
func newObjectStorageClient(ctx context.Context, opt *Options) (*objectstorage.ObjectStorageClient, error) {
p, err := getConfigurationProvider(opt)
if err != nil {
return nil, err
}
client, err := objectstorage.NewObjectStorageClientWithConfigurationProvider(p)
if err != nil {
fs.Errorf(opt.Provider, "failed to create object storage client, %v", err)
return nil, err
}
if opt.Region != "" {
client.SetRegion(opt.Region)
}
modifyClient(ctx, opt, &client.BaseClient)
return &client, err
}
func fileExists(filePath string) bool {
if _, err := os.Stat(filePath); errors.Is(err, os.ErrNotExist) {
return false
}
return true
}
func modifyClient(ctx context.Context, opt *Options, client *common.BaseClient) {
client.HTTPClient = getHTTPClient(ctx)
if opt.Provider == noAuth {
client.Signer = getNoAuthSigner()
}
}
// getClient makes http client according to the global options
// this has rclone specific options support like dump headers, body etc.
func getHTTPClient(ctx context.Context) *http.Client {
return fshttp.NewClient(ctx)
}
var retryErrorCodes = []int{
408, // Request Timeout
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable
504, // Gateway Time-out
}
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// If this is an ocierr object, try and extract more useful information to determine if we should retry
if ociError, ok := err.(common.ServiceError); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(err) {
return true, err
}
// If it is a timeout then we want to retry that
if ociError.GetCode() == "RequestTimeout" {
return true, err
}
}
// Ok, not an oci error, check for generic failure conditions
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
func getNoAuthConfiguration() (common.ConfigurationProvider, error) {
return &noAuthConfigurator{}, nil
}
func getNoAuthSigner() common.HTTPRequestSigner {
return &noAuthSigner{}
}
type noAuthConfigurator struct {
}
type noAuthSigner struct {
}
func (n *noAuthSigner) Sign(*http.Request) error {
return nil
}
func (n *noAuthConfigurator) PrivateRSAKey() (*rsa.PrivateKey, error) {
return nil, nil
}
func (n *noAuthConfigurator) KeyID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) TenancyOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) UserOCID() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) KeyFingerprint() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) Region() (string, error) {
return "", nil
}
func (n *noAuthConfigurator) AuthType() (common.AuthConfig, error) {
return common.AuthConfig{
AuthType: common.UnknownAuthenticationType,
IsFromConfigFile: false,
OboToken: nil,
}, nil
}
// Check the interfaces are satisfied
var (
_ common.ConfigurationProvider = &noAuthConfigurator{}
_ common.HTTPRequestSigner = &noAuthSigner{}
)

View File

@@ -1,228 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Command Interface Implementation
// ------------------------------------------------------------
const (
operationRename = "rename"
operationListMultiPart = "list-multipart-uploads"
operationCleanup = "cleanup"
)
var commandHelp = []fs.CommandHelp{{
Name: operationRename,
Short: "change the name of an object",
Long: `This command can be used to rename a object.
Usage Examples:
rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
`,
Opts: nil,
}, {
Name: operationListMultiPart,
Short: "List the unfinished multipart uploads",
Long: `This command lists the unfinished multipart uploads in JSON format.
rclone backend list-multipart-uploads oos:bucket/path/to/object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads.
You can call it with no bucket in which case it lists all bucket, with
a bucket or with a bucket and path.
{
"test-bucket": [
{
"namespace": "test-namespace",
"bucket": "test-bucket",
"object": "600m.bin",
"uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
"timeCreated": "2022-07-29T06:21:16.595Z",
"storageTier": "Standard"
}
]
`,
}, {
Name: operationCleanup,
Short: "Remove unfinished multipart uploads.",
Long: `This command removes unfinished multipart uploads of age greater than
max-age which defaults to 24 hours.
Note that you can use -i/--dry-run with this command to see what it
would do.
rclone backend cleanup oos:bucket/path/to/object
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
`,
Opts: map[string]string{
"max-age": "Max age of upload to delete",
},
},
}
/*
Command the backend to run a named command
The command run is name
args may be used to read arguments from
opts may be used to read optional arguments from
The result should be capable of being JSON encoded
If it is a string or a []string it will be shown to the user
otherwise it will be JSON encoded and shown to the user like that
*/
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
opt map[string]string) (result interface{}, err error) {
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
switch commandName {
case operationRename:
if len(args) < 2 {
return nil, fmt.Errorf("path to object or its new name to rename is empty")
}
remote := args[0]
newName := args[1]
return f.rename(ctx, remote, newName)
case operationListMultiPart:
return f.listMultipartUploadsAll(ctx)
case operationCleanup:
maxAge := 24 * time.Hour
if opt["max-age"] != "" {
maxAge, err = fs.ParseDuration(opt["max-age"])
if err != nil {
return nil, fmt.Errorf("bad max-age: %w", err)
}
}
return nil, f.cleanUp(ctx, maxAge)
default:
return nil, fs.ErrorCommandNotFound
}
}
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
if remote == "" {
return nil, fmt.Errorf("path to object file cannot be empty")
}
if newName == "" {
return nil, fmt.Errorf("the object's new name cannot be empty")
}
o := &Object{
fs: f,
remote: remote,
}
bucketName, objectPath := o.split()
err := o.readMetaData(ctx)
if err != nil {
fs.Errorf(f, "failed to read object:%v %v ", objectPath, err)
if strings.HasPrefix(objectPath, bucketName) {
fs.Errorf(f, "warn: ensure object path: %v is relative to bucket:%v and doesn't include the bucket name",
objectPath, bucketName)
}
return nil, fs.ErrorNotAFile
}
details := objectstorage.RenameObjectDetails{
SourceName: common.String(objectPath),
NewName: common.String(newName),
}
request := objectstorage.RenameObjectRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
RenameObjectDetails: details,
OpcClientRequestId: nil,
RequestMetadata: common.RequestMetadata{},
}
var response objectstorage.RenameObjectResponse
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.RenameObject(ctx, request)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
fs.Infof(f, "success: renamed object-path: %v to %v", objectPath, newName)
return "renamed successfully", nil
}
func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*objectstorage.MultipartUpload,
err error) {
uploadsMap = make(map[string][]*objectstorage.MultipartUpload)
bucket, directory := f.split("")
if bucket != "" {
uploads, err := f.listMultipartUploads(ctx, bucket, directory)
if err != nil {
return uploadsMap, err
}
uploadsMap[bucket] = uploads
return uploadsMap, nil
}
entries, err := f.listBuckets(ctx)
if err != nil {
return uploadsMap, err
}
for _, entry := range entries {
bucket := entry.Remote()
uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
if listErr != nil {
err = listErr
fs.Errorf(f, "%v", err)
}
uploadsMap[bucket] = uploads
}
return uploadsMap, err
}
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix, so it matches
// directories and objects. This could surprise the user if they ask
// for "dir" and it returns "dirKey"
func (f *Fs) listMultipartUploads(ctx context.Context, bucketName, directory string) (
uploads []*objectstorage.MultipartUpload, err error) {
uploads = []*objectstorage.MultipartUpload{}
req := objectstorage.ListMultipartUploadsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
var response objectstorage.ListMultipartUploadsResponse
for {
err = f.pacer.Call(func() (bool, error) {
response, err = f.srv.ListMultipartUploads(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
// fs.Debugf(f, "failed to list multi part uploads %v", err)
return uploads, err
}
for index, item := range response.Items {
if directory != "" && item.Object != nil && !strings.HasPrefix(*item.Object, directory) {
continue
}
uploads = append(uploads, &response.Items[index])
}
if response.OpcNextPage == nil {
break
}
req.Page = response.OpcNextPage
}
return uploads, nil
}

View File

@@ -1,155 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
)
// ------------------------------------------------------------
// Implement Copier is an optional interfaces for Fs
//------------------------------------------------------------
// Copy src to this remote using server-side copy operations.
// This is stored with the remote path given
// It returns the destination Object and a possible error
// Will only be called if src.Fs().Name() == f.Name()
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
// fs.Debugf(f, "copying %v to %v", src.Remote(), remote)
srcObj, ok := src.(*Object)
if !ok {
// fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Temporary Object under construction
dstObj := &Object{
fs: f,
remote: remote,
}
err := f.copy(ctx, dstObj, srcObj)
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// copy does a server-side copy from dstObj <- srcObj
//
// If newInfo is nil then the metadata will be copied otherwise it
// will be replaced with newInfo
func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err error) {
srcBucket, srcPath := srcObj.split()
dstBucket, dstPath := dstObj.split()
if dstBucket != srcBucket {
exists, err := f.bucketExists(ctx, dstBucket)
if err != nil {
return err
}
if !exists {
err = f.makeBucket(ctx, dstBucket)
if err != nil {
return err
}
}
}
copyObjectDetails := objectstorage.CopyObjectDetails{
SourceObjectName: common.String(srcPath),
DestinationRegion: common.String(dstObj.fs.opt.Region),
DestinationNamespace: common.String(dstObj.fs.opt.Namespace),
DestinationBucket: common.String(dstBucket),
DestinationObjectName: common.String(dstPath),
DestinationObjectMetadata: metadataWithOpcPrefix(srcObj.meta),
}
req := objectstorage.CopyObjectRequest{
NamespaceName: common.String(srcObj.fs.opt.Namespace),
BucketName: common.String(srcBucket),
CopyObjectDetails: copyObjectDetails,
}
var resp objectstorage.CopyObjectResponse
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.CopyObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return err
}
workRequestID := resp.OpcWorkRequestId
timeout := time.Duration(f.opt.CopyTimeout)
dstName := dstObj.String()
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/copyingobjects.htm
// To enable server side copy object, customers will have to
// grant policy to objectstorage service to manage object-family
// Allow service objectstorage-<region_identifier> to manage object-family in tenancy
// Another option to avoid the policy is to download and reupload the file.
// This download upload will work for maximum file size limit of 5GB
err = copyObjectWaitForWorkRequest(ctx, workRequestID, dstName, timeout, f.srv)
if err != nil {
return err
}
return err
}
func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType string, timeout time.Duration,
client *objectstorage.ObjectStorageClient) error {
stateConf := &StateChangeConf{
Pending: []string{
string(objectstorage.WorkRequestStatusAccepted),
string(objectstorage.WorkRequestStatusInProgress),
string(objectstorage.WorkRequestStatusCanceling),
},
Target: []string{
string(objectstorage.WorkRequestSummaryStatusCompleted),
string(objectstorage.WorkRequestSummaryStatusCanceled),
string(objectstorage.WorkRequestStatusFailed),
},
Refresh: func() (interface{}, string, error) {
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
getWorkRequestRequest.WorkRequestId = wID
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
wr := &workRequestResponse.WorkRequest
return workRequestResponse, string(wr.Status), err
},
Timeout: timeout,
}
wrr, e := stateConf.WaitForStateContext(ctx, entityType)
if e != nil {
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, e)
}
wr := wrr.(objectstorage.GetWorkRequestResponse).WorkRequest
if wr.Status == objectstorage.WorkRequestStatusFailed {
errorMessage, _ := getObjectStorageErrorFromWorkRequest(ctx, wID, client)
return fmt.Errorf("work request did not succeed, workId: %s, entity: %s. Message: %s", *wID, entityType, errorMessage)
}
return nil
}
func getObjectStorageErrorFromWorkRequest(ctx context.Context, workRequestID *string, client *objectstorage.ObjectStorageClient) (string, error) {
req := objectstorage.ListWorkRequestErrorsRequest{}
req.WorkRequestId = workRequestID
res, err := client.ListWorkRequestErrors(ctx, req)
if err != nil {
return "", err
}
allErrs := make([]string, 0)
for _, errs := range res.Items {
allErrs = append(allErrs, *errs.Message)
}
errorMessage := strings.Join(allErrs, "\n")
return errorMessage, nil
}

View File

@@ -1,621 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/ncw/swift/v2"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/atexit"
)
// ------------------------------------------------------------
// Object Interface Implementation
// ------------------------------------------------------------
const (
metaMtime = "mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
metaMD5Hash = "md5chksum" // the meta key to store md5hash in
// StandardTier object storage tier
ociMetaPrefix = "opc-meta-"
)
var archive = "archive"
var infrequentAccess = "infrequentaccess"
var standard = "standard"
var storageTierMap = map[string]*string{
archive: &archive,
infrequentAccess: &infrequentAccess,
standard: &standard,
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Object describes a oci bucket object
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
md5 string // MD5 hash if known
bytes int64 // Size of the object
lastModified time.Time // The modified time of the object if known
meta map[string]string // The object metadata if known - may be nil
mimeType string // Content-Type of the object
// Metadata as pointers to strings as they often won't be present
storageTier *string // e.g. Standard
}
// split returns bucket and bucketPath from the object
func (o *Object) split() (bucket, bucketPath string) {
return o.fs.split(o.remote)
}
// readMetaData gets the metadata if it hasn't already been fetched
func (o *Object) readMetaData(ctx context.Context) (err error) {
fs.Debugf(o, "trying to read metadata %v", o.remote)
if o.meta != nil {
return nil
}
info, err := o.headObject(ctx)
if err != nil {
return err
}
return o.decodeMetaDataHead(info)
}
// headObject gets the metadata from the object unconditionally
func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObjectResponse, err error) {
bucketName, objectPath := o.split()
req := objectstorage.HeadObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(objectPath),
}
var response objectstorage.HeadObjectResponse
err = o.fs.pacer.Call(func() (bool, error) {
var err error
response, err = o.fs.srv.HeadObject(ctx, req)
return shouldRetry(ctx, response.HTTPResponse(), err)
})
if err != nil {
if svcErr, ok := err.(common.ServiceError); ok {
if svcErr.GetHTTPStatusCode() == http.StatusNotFound {
return nil, fs.ErrorObjectNotFound
}
}
return nil, err
}
o.fs.cache.MarkOK(bucketName)
return &response, err
}
func (o *Object) decodeMetaDataHead(info *objectstorage.HeadObjectResponse) (err error) {
return o.setMetaData(
info.ContentLength,
info.ContentMd5,
info.ContentType,
info.LastModified,
info.StorageTier,
info.OpcMeta)
}
func (o *Object) decodeMetaDataObject(info *objectstorage.GetObjectResponse) (err error) {
return o.setMetaData(
info.ContentLength,
info.ContentMd5,
info.ContentType,
info.LastModified,
info.StorageTier,
info.OpcMeta)
}
func (o *Object) setMetaData(
contentLength *int64,
contentMd5 *string,
contentType *string,
lastModified *common.SDKTime,
storageTier interface{},
meta map[string]string) error {
if contentLength != nil {
o.bytes = *contentLength
}
if contentMd5 != nil {
md5, err := o.base64ToMd5(*contentMd5)
if err == nil {
o.md5 = md5
}
}
o.meta = meta
if o.meta == nil {
o.meta = map[string]string{}
}
// Read MD5 from metadata if present
if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
md5, err := o.base64ToMd5(md5sumBase64)
if err != nil {
o.md5 = md5
}
}
if lastModified == nil {
o.lastModified = time.Now()
fs.Logf(o, "Failed to read last modified")
} else {
o.lastModified = lastModified.Time
}
if contentType != nil {
o.mimeType = *contentType
}
if storageTier == nil || storageTier == "" {
o.storageTier = storageTierMap[standard]
} else {
tier := strings.ToLower(fmt.Sprintf("%v", storageTier))
o.storageTier = storageTierMap[tier]
}
return nil
}
func (o *Object) base64ToMd5(md5sumBase64 string) (md5 string, err error) {
md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
if err != nil {
fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
return "", err
} else if len(md5sumBytes) != 16 {
fs.Debugf(o, "failed to read md5sum from metadata %q: wrong length", md5sumBase64)
return "", fmt.Errorf("failed to read md5sum from metadata %q: wrong length", md5sumBase64)
}
return hex.EncodeToString(md5sumBytes), nil
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// GetTier returns storage class as string
func (o *Object) GetTier() string {
if o.storageTier == nil || *o.storageTier == "" {
return standard
}
return *o.storageTier
}
// SetTier performs changing storage class
func (o *Object) SetTier(tier string) (err error) {
ctx := context.TODO()
tier = strings.ToLower(tier)
bucketName, bucketPath := o.split()
tierEnum, ok := objectstorage.GetMappingStorageTierEnum(tier)
if !ok {
return fmt.Errorf("not a valid storage tier %v ", tier)
}
req := objectstorage.UpdateObjectStorageTierRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
UpdateObjectStorageTierDetails: objectstorage.UpdateObjectStorageTierDetails{
ObjectName: common.String(bucketPath),
StorageTier: tierEnum,
},
}
_, err = o.fs.srv.UpdateObjectStorageTier(ctx, req)
if err != nil {
return err
}
o.storageTier = storageTierMap[tier]
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Hash returns the MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
// Convert base64 encoded md5 into lower case hex
if o.md5 == "" {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
}
return o.md5, nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned to the http headers
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
if o.fs.ci.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == "" {
return o.lastModified
}
modTime, err := swift.FloatStringToTime(d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
o.meta[metaMtime] = swift.TimeToFloatString(modTime)
_, err = o.fs.Copy(ctx, o, o.remote)
return err
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
bucketName, bucketPath := o.split()
req := objectstorage.DeleteObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
}
err := o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.DeleteObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
return err
}
// Open object file
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
bucketName, bucketPath := o.split()
req := objectstorage.GetObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
}
o.applyGetObjectOptions(&req, options...)
var resp objectstorage.GetObjectResponse
err := o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.srv.GetObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
// read size from ContentLength or ContentRange
bytes := resp.ContentLength
if resp.ContentRange != nil {
var contentRange = *resp.ContentRange
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
bytes = &i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
err = o.decodeMetaDataObject(&resp)
if err != nil {
return nil, err
}
o.bytes = *bytes
return resp.HTTPResponse().Body, nil
}
// Update an object if it has changed
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucketName, bucketPath := o.split()
err = o.fs.makeBucket(ctx, bucketName)
if err != nil {
return err
}
// determine if we like upload single or multipart.
size := src.Size()
multipart := size >= int64(o.fs.opt.UploadCutoff)
// Set the mtime in the metadata
modTime := src.ModTime(ctx)
metadata := map[string]string{
metaMtime: swift.TimeToFloatString(modTime),
}
// read the md5sum if available
// - for non-multipart
// - so we can add a ContentMD5
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
if multipart && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the ETag is not an MD5, e.g. when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = md5sumBase64
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
if multipart {
chunkSize := int64(o.fs.opt.ChunkSize)
uploadRequest := transfer.UploadRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PartSize: common.Int64(chunkSize),
AllowMultipartUploads: common.Bool(true),
AllowParrallelUploads: common.Bool(true),
ObjectStorageClient: o.fs.srv,
EnableMultipartChecksumVerification: common.Bool(!o.fs.opt.DisableChecksum),
NumberOfGoroutines: common.Int(o.fs.opt.UploadConcurrency),
Metadata: metadataWithOpcPrefix(metadata),
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
uploadRequest.StorageTier = storageTier
}
o.applyMultiPutOptions(&uploadRequest, options...)
uploadStreamRequest := transfer.UploadStreamRequest{
UploadRequest: uploadRequest,
StreamReader: in,
}
uploadMgr := transfer.NewUploadManager()
var uploadID = ""
defer atexit.OnError(&err, func() {
if uploadID == "" {
return
}
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Cancelling multipart upload")
errCancel := o.fs.abortMultiPartUpload(
context.Background(),
bucketName,
bucketPath,
uploadID)
if errCancel != nil {
fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
}
})()
err = o.fs.pacer.Call(func() (bool, error) {
uploadResponse, err := uploadMgr.UploadStream(ctx, uploadStreamRequest)
var httpResponse *http.Response
if err == nil {
if uploadResponse.Type == transfer.MultipartUpload {
if uploadResponse.MultipartUploadResponse != nil {
httpResponse = uploadResponse.MultipartUploadResponse.HTTPResponse()
}
} else {
if uploadResponse.SinglepartUploadResponse != nil {
httpResponse = uploadResponse.SinglepartUploadResponse.HTTPResponse()
}
}
}
if err != nil {
uploadID := ""
if uploadResponse.MultipartUploadResponse != nil && uploadResponse.MultipartUploadResponse.UploadID != nil {
uploadID = *uploadResponse.MultipartUploadResponse.UploadID
fs.Debugf(o, "multipart streaming upload failed, aborting uploadID: %v, may retry", uploadID)
_ = o.fs.abortMultiPartUpload(ctx, bucketName, bucketPath, uploadID)
}
}
return shouldRetry(ctx, httpResponse, err)
})
if err != nil {
fs.Errorf(o, "multipart streaming upload failed %v", err)
return err
}
} else {
req := objectstorage.PutObjectRequest{
NamespaceName: common.String(o.fs.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
ContentType: common.String(mimeType),
PutObjectBody: io.NopCloser(in),
OpcMeta: metadata,
}
if size >= 0 {
req.ContentLength = common.Int64(size)
}
if o.fs.opt.StorageTier != "" {
storageTier, ok := objectstorage.GetMappingPutObjectStorageTierEnum(o.fs.opt.StorageTier)
if !ok {
return fmt.Errorf("not a valid storage tier: %v", o.fs.opt.StorageTier)
}
req.StorageTier = storageTier
}
o.applyPutOptions(&req, options...)
err = o.fs.pacer.Call(func() (bool, error) {
resp, err := o.fs.srv.PutObject(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
fs.Errorf(o, "put object failed %v", err)
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
return o.readMetaData(ctx)
}
func (o *Object) applyPutOptions(req *objectstorage.PutObjectRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.CacheControl = common.String(value)
case "content-disposition":
req.ContentDisposition = common.String(value)
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.OpcMeta[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func (o *Object) applyGetObjectOptions(req *objectstorage.GetObjectRequest, options ...fs.OpenOption) {
fs.FixRangeOption(options, o.bytes)
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "cache-control":
req.HttpResponseCacheControl = common.String(value)
case "content-disposition":
req.HttpResponseContentDisposition = common.String(value)
case "content-encoding":
req.HttpResponseContentEncoding = common.String(value)
case "content-language":
req.HttpResponseContentLanguage = common.String(value)
case "content-type":
req.HttpResponseContentType = common.String(value)
case "range":
// do nothing
default:
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
func (o *Object) applyMultiPutOptions(req *transfer.UploadRequest, options ...fs.OpenOption) {
// Apply upload options
for _, option := range options {
key, value := option.Header()
lowerKey := strings.ToLower(key)
switch lowerKey {
case "":
// ignore
case "content-encoding":
req.ContentEncoding = common.String(value)
case "content-language":
req.ContentLanguage = common.String(value)
case "content-type":
req.ContentType = common.String(value)
default:
if strings.HasPrefix(lowerKey, ociMetaPrefix) {
req.Metadata[lowerKey] = value
} else {
fs.Errorf(o, "Don't know how to set key %q on upload", key)
}
}
}
}
func metadataWithOpcPrefix(src map[string]string) map[string]string {
dst := make(map[string]string)
for lowerKey, value := range src {
if !strings.HasPrefix(lowerKey, ociMetaPrefix) {
dst[ociMetaPrefix+lowerKey] = value
}
}
return dst
}

View File

@@ -1,242 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/lib/encoder"
)
const (
maxSizeForCopy = 4768 * 1024 * 1024
minChunkSize = fs.SizeSuffix(1024 * 1024 * 5)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 100 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
)
const (
userPrincipal = "user_principal_auth"
instancePrincipal = "instance_principal_auth"
resourcePrincipal = "resource_principal_auth"
environmentAuth = "env_auth"
noAuth = "no_auth"
userPrincipalHelpText = `use an OCI user and an API key for authentication.
youll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
instancePrincipalHelpText = `use instance principals to authorize an instance to make API calls.
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
resourcePrincipalHelpText = `use resource principals to make API calls`
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
noAuthHelpText = `no credentials needed, this is typically for reading public buckets`
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
Compartment string `config:"compartment"`
Namespace string `config:"namespace"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
ConfigFile string `config:"config_file"`
ConfigProfile string `config:"config_profile"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
DisableChecksum bool `config:"disable_checksum"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
CopyTimeout fs.Duration `config:"copy_timeout"`
StorageTier string `config:"storage_tier"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
NoCheckBucket bool `config:"no_check_bucket"`
}
func newOptions() []fs.Option {
return []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your Auth Provider",
Required: true,
Default: environmentAuth,
Examples: []fs.OptionExample{{
Value: environmentAuth,
Help: environmentAuthHelpText,
}, {
Value: userPrincipal,
Help: userPrincipalHelpText,
}, {
Value: instancePrincipal,
Help: instancePrincipalHelpText,
}, {
Value: resourcePrincipal,
Help: resourcePrincipalHelpText,
}, {
Value: noAuth,
Help: noAuthHelpText,
}},
}, {
Name: "namespace",
Help: "Object storage namespace",
Required: true,
}, {
Name: "compartment",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: true,
}, {
Name: "region",
Help: "Object storage Region",
Required: true,
}, {
Name: "endpoint",
Help: "Endpoint for Object storage API.\n\nLeave blank to use the default endpoint for the region.",
Required: false,
}, {
Name: "config_file",
Help: "Path to OCI config file",
Provider: userPrincipal,
Default: "~/.oci/config",
Examples: []fs.OptionExample{{
Value: "~/.oci/config",
Help: "oci configuration file location",
}},
}, {
Name: "config_profile",
Help: "Profile name inside the oci config file",
Provider: userPrincipal,
Default: "Default",
Examples: []fs.OptionExample{{
Value: "Default",
Help: "Use the default profile",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff or files with unknown
size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
photos or google docs) they will be uploaded as multipart uploads
using this chunk size.
Note that "upload_concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high-speed links and you have
enough memory, then increasing this will speed up the transfers.
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10,000 chunks limit.
Files of unknown size are uploaded with the configured
chunk_size. Since the default chunk size is 5 MiB and there can be at
most 10,000 chunks, this means that by default the maximum size of
a file you can stream upload is 48 GiB. If you wish to stream upload
larger files then you will need to increase chunk_size.
Increasing the chunk size decreases the accuracy of the progress
statistics displayed with "-P" flag.
`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: defaultUploadConcurrency,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy.
Any files larger than this that need to be server-side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5 GiB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "copy_timeout",
Help: `Timeout for copy.
Copy is an asynchronous operation, specify timeout to wait for copy to succeed
`,
Default: defaultCopyTimeoutDuration,
Advanced: true,
}, {
Name: "disable_checksum",
Help: `Don't store MD5 checksum with object metadata.
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object. This is great
for data integrity checking but can cause long delays for large files
to start uploading.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
// Any UTF-8 character is valid in a key, however it can't handle
// invalid UTF-8 and / have a special meaning.
//
// The SDK can't seem to handle uploading files called '.
// - initial / encoding
// - doubled / encoding
// - trailing / encoding
// so that OSS keys are always valid file names
Default: encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash |
encoder.EncodeDot,
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
It should be set to true for resuming uploads across different sessions.
WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
additional costs if not cleaned up.
`,
Default: false,
Advanced: true,
}, {
Name: "no_check_bucket",
Help: `If set, don't attempt to check the bucket exists or create it.
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already.
It can also be needed if the user you are using does not have bucket
creation permissions.
`,
Default: false,
Advanced: true,
}}
}

View File

@@ -1,695 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
// Package oracleobjectstorage provides an interface to the OCI object storage system.
package oracleobjectstorage
import (
"context"
"fmt"
"io"
"net/http"
"path"
"strings"
"time"
"github.com/oracle/oci-go-sdk/v65/common"
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/pacer"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "oracleobjectstorage",
Description: "Oracle Cloud Infrastructure Object Storage",
Prefix: "oos",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: newOptions(),
})
}
// Fs represents a remote object storage server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
ci *fs.ConfigInfo // global config
features *fs.Features // optional features
srv *objectstorage.ObjectStorageClient // the connection to the object storage
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache for bucket creation status
pacer *fs.Pacer // To pace the API calls
}
// NewFs Initialize backend
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
ci := fs.GetConfig(ctx)
objectStorageClient, err := newObjectStorageClient(ctx, opt)
if err != nil {
return nil, err
}
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
f := &Fs{
name: name,
opt: *opt,
ci: ci,
srv: objectStorageClient,
cache: bucket.NewCache(),
pacer: fs.NewPacer(ctx, p),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
GetTier: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootBucket != "" && f.rootDirectory != "" && !strings.HasSuffix(root, "/") {
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f.root
newRoot, leaf := path.Split(oldRoot)
f.setRoot(newRoot)
_, err := f.NewObject(ctx, leaf)
if err != nil {
// File doesn't exist or is a directory so return old f
f.setRoot(oldRoot)
return f, nil
}
// return an error with fs which points to the parent
return f, fs.ErrorIsFile
}
return f, err
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// ------------------------------------------------------------
// Implement backed that represents a remote object storage server
// Fs is the interface a cloud storage system must provide
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootBucket == "" {
return "oos:root"
}
if f.rootDirectory == "" {
return fmt.Sprintf("oos:bucket %s", f.rootBucket)
}
return fmt.Sprintf("oos:bucket %s, path %s", f.rootBucket, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error
// list the objects into the function supplied from
// the bucket and root supplied
// (bucket, directory) is the starting directory
// If prefix is set then it is removed from all file names
// If addBucket is set then it adds the bucket to the start of the remotes generated
// If recurse is set the function will recursively list
// If limit is > 0 then it limits to that many files (must be less than 1000)
// If hidden is set then it will list the hidden (deleted) files too.
// if findFile is set it will look for files called (bucket, directory)
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int,
fn listFn) (err error) {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
}
delimiter := ""
if !recurse {
delimiter = "/"
}
chunkSize := 1000
if limit > 0 {
chunkSize = limit
}
var request = objectstorage.ListObjectsRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucket),
Prefix: common.String(directory),
Limit: common.Int(chunkSize),
Fields: common.String("name,size,etag,timeCreated,md5,timeModified,storageTier,archivalState"),
}
if delimiter != "" {
request.Delimiter = common.String(delimiter)
}
for {
var resp objectstorage.ListObjectsResponse
err = f.pacer.Call(func() (bool, error) {
var err error
resp, err = f.srv.ListObjects(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
if ociError, ok := err.(common.ServiceError); ok {
// If it is a timeout then we want to retry that
if ociError.GetHTTPStatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
if f.rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr, ok := err.(common.ServiceError); ok {
// 301 if wrong region for bucket
if reqErr.GetHTTPStatusCode() == http.StatusMovedPermanently {
fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
return nil
}
}
}
return err
}
if !recurse {
for _, commonPrefix := range resp.ListObjects.Prefixes {
if commonPrefix == "" {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := commonPrefix
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(prefix):]
if addBucket {
remote = path.Join(bucket, remote)
}
remote = strings.TrimSuffix(remote, "/")
err = fn(remote, &objectstorage.ObjectSummary{Name: &remote}, true)
if err != nil {
return err
}
}
}
for i := range resp.Objects {
object := &resp.Objects[i]
// Finish if file name no longer has prefix
//if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
// return nil
//}
remote := *object.Name
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, prefix) {
// fs.Debugf(f, "Odd name received %v", object.Name)
continue
}
remote = remote[len(prefix):]
// Check for directory
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if addBucket {
remote = path.Join(bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
continue // skip directory marker
}
if isDirectory && len(remote) > 1 {
remote = remote[:len(remote)-1]
}
err = fn(remote, object, isDirectory)
if err != nil {
return err
}
}
// end if no NextFileName
if resp.NextStartWith == nil {
break
}
request.Start = resp.NextStartWith
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectstorage.ObjectSummary, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
}
// listBuckets returns all the buckets to out
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
if f.opt.Provider == noAuth {
return nil, fmt.Errorf("can't list buckets with %v provider, use a valid auth provider in config file", noAuth)
}
var request = objectstorage.ListBucketsRequest{
NamespaceName: common.String(f.opt.Namespace),
CompartmentId: common.String(f.opt.Compartment),
}
var resp objectstorage.ListBucketsResponse
for {
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.ListBuckets(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err != nil {
return nil, err
}
for _, item := range resp.Items {
bucketName := f.opt.Enc.ToStandardName(*item.Name)
f.cache.MarkOK(bucketName)
d := fs.NewDir(bucketName, item.TimeCreated.Time)
entries = append(entries, d)
}
if resp.OpcNextPage == nil {
break
}
request.Page = resp.OpcNextPage
}
return entries, nil
}
// Return an Object from a path
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *objectstorage.ObjectSummary) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.TimeModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = info.TimeModified.Time
}
if info.Md5 != nil {
md5, err := o.base64ToMd5(*info.Md5)
if err != nil {
o.md5 = md5
}
}
o.bytes = *info.Size
o.storageTier = storageTierMap[strings.ToLower(string(info.StorageTier))]
} else {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// Put the object into the bucket
// Copy the reader in to the new object which is returned
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: src.Remote(),
}
return o, o.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
bucketName, _ := f.split(dir)
return f.makeBucket(ctx, bucketName)
}
// makeBucket creates the bucket if it doesn't exist
func (f *Fs) makeBucket(ctx context.Context, bucketName string) error {
if f.opt.NoCheckBucket {
return nil
}
return f.cache.Create(bucketName, func() error {
details := objectstorage.CreateBucketDetails{
Name: common.String(bucketName),
CompartmentId: common.String(f.opt.Compartment),
PublicAccessType: objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess,
}
req := objectstorage.CreateBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
CreateBucketDetails: details,
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CreateBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
fs.Infof(f, "Bucket %q created with accessType %q", bucketName,
objectstorage.CreateBucketDetailsPublicAccessTypeNopublicaccess)
}
if svcErr, ok := err.(common.ServiceError); ok {
if code := svcErr.GetCode(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
err = nil
}
}
return err
}, func() (bool, error) {
return f.bucketExists(ctx, bucketName)
})
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) bucketExists(ctx context.Context, bucketName string) (bool, error) {
req := objectstorage.HeadBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.HeadBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
return true, nil
}
if err, ok := err.(common.ServiceError); ok {
if err.GetHTTPStatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Rmdir delete an empty bucket. if bucket is not empty this is will fail with appropriate error
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
bucketName, directory := f.split(dir)
if bucketName == "" || directory != "" {
return nil
}
return f.cache.Remove(bucketName, func() error {
req := objectstorage.DeleteBucketRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.DeleteBucket(ctx, req)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
if err == nil {
fs.Infof(f, "Bucket %q deleted", bucketName)
}
return err
})
}
func (f *Fs) abortMultiPartUpload(ctx context.Context, bucketName, bucketPath, uploadID string) (err error) {
if uploadID == "" {
return nil
}
request := objectstorage.AbortMultipartUploadRequest{
NamespaceName: common.String(f.opt.Namespace),
BucketName: common.String(bucketName),
ObjectName: common.String(bucketPath),
UploadId: common.String(uploadID),
}
err = f.pacer.Call(func() (bool, error) {
resp, err := f.srv.AbortMultipartUpload(ctx, request)
return shouldRetry(ctx, resp.HTTPResponse(), err)
})
return err
}
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration,
uploads []*objectstorage.MultipartUpload) (err error) {
fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
for _, upload := range uploads {
if upload.TimeCreated != nil && upload.Object != nil && upload.UploadId != nil {
age := time.Since(upload.TimeCreated.Time)
what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Object,
upload.TimeCreated, age)
if age > maxAge {
fs.Infof(f, "removing %s", what)
if operations.SkipDestructive(ctx, what, "remove pending upload") {
continue
}
ignoreErr := f.abortMultiPartUpload(ctx, *upload.Bucket, *upload.Object, *upload.UploadId)
if ignoreErr != nil {
// fs.Debugf(f, "ignoring error %s", ignoreErr)
}
} else {
// fs.Debugf(f, "ignoring %s", what)
}
} else {
fs.Infof(f, "MultipartUpload doesn't have sufficient details to abort.")
}
}
return err
}
// CleanUp removes all pending multipart uploads
func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
uploadsMap, err := f.listMultipartUploadsAll(ctx)
if err != nil {
return err
}
for bucketName, uploads := range uploadsMap {
cleanErr := f.cleanUpBucket(ctx, bucketName, maxAge, uploads)
if err != nil {
fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucketName, cleanErr)
err = cleanErr
}
}
return err
}
// CleanUp removes all pending multipart uploads older than 24 hours
func (f *Fs) CleanUp(ctx context.Context) (err error) {
return f.cleanUp(ctx, 24*time.Hour)
}
// ------------------------------------------------------------
// Implement ListRer is an optional interfaces for Fs
//------------------------------------------------------------
/*
ListR lists the objects and directories of the Fs starting
from dir recursively into out.
dir should be "" to start from the root, and should not
have trailing slashes.
This should return ErrDirNotFound if the directory isn't
found.
It should call callback for each tranche of entries read.
These need not be returned in any particular order. If
callback returns an error then the listing will stop
immediately.
Don't implement this unless you have a more efficient way
of listing recursively that doing a directory traversal.
*/
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
bucketName, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(bucket, directory, prefix string, addBucket bool) error {
return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
}
if bucketName == "" {
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
bucketName := entry.Remote()
err = listR(bucketName, "", f.rootDirectory, true)
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucketName)
}
} else {
err = listR(bucketName, directory, f.rootDirectory, f.rootBucket == "")
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucketName)
}
return list.Flush()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
_ fs.GetTierer = &Object{}
_ fs.SetTierer = &Object{}
)

View File

@@ -1,33 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestOracleObjectStorage:",
TiersToTest: []string{"standard", "archive"},
NilObject: (*Object)(nil),
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
})
}
func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetUploadCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadCutoff(cs)
}
var _ fstests.SetUploadChunkSizer = (*Fs)(nil)

View File

@@ -1,7 +0,0 @@
// Build for oracleobjectstorage for unsupported platforms to stop go complaining
// about "no buildable Go source files "
//go:build plan9 || solaris || js
// +build plan9 solaris js
package oracleobjectstorage

View File

@@ -1,362 +0,0 @@
//go:build !plan9 && !solaris && !js
// +build !plan9,!solaris,!js
package oracleobjectstorage
import (
"context"
"fmt"
"strings"
"time"
"github.com/rclone/rclone/fs"
)
var refreshGracePeriod = 30 * time.Second
// StateRefreshFunc is a function type used for StateChangeConf that is
// responsible for refreshing the item being watched for a state change.
//
// It returns three results. `result` is any object that will be returned
// as the final object after waiting for state change. This allows you to
// return the final updated object, for example an EC2 instance after refreshing
// it. A nil result represents not found.
//
// `state` is the latest state of that object. And `err` is any error that
// may have happened while refreshing the state.
type StateRefreshFunc func() (result interface{}, state string, err error)
// StateChangeConf is the configuration struct used for `WaitForState`.
type StateChangeConf struct {
Delay time.Duration // Wait this time before starting checks
Pending []string // States that are "allowed" and will continue trying
Refresh StateRefreshFunc // Refreshes the current state
Target []string // Target state
Timeout time.Duration // The amount of time to wait before timeout
MinTimeout time.Duration // Smallest time to wait before refreshes
PollInterval time.Duration // Override MinTimeout/backoff and only poll this often
NotFoundChecks int // Number of times to allow not found (nil result from Refresh)
// This is to work around inconsistent APIs
ContinuousTargetOccurrence int // Number of times the Target state has to occur continuously
}
// WaitForStateContext watches an object and waits for it to achieve the state
// specified in the configuration using the specified Refresh() func,
// waiting the number of seconds specified in the timeout configuration.
//
// If the Refresh function returns an error, exit immediately with that error.
//
// If the Refresh function returns a state other than the Target state or one
// listed in Pending, return immediately with an error.
//
// If the Timeout is exceeded before reaching the Target state, return an
// error.
//
// Otherwise, the result is the result of the first call to the Refresh function to
// reach the target state.
//
// Cancellation from the passed in context will cancel the refresh loop
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
notfoundTick := 0
targetOccurrence := 0
// Set a default for times to check for not found
if conf.NotFoundChecks == 0 {
conf.NotFoundChecks = 20
}
if conf.ContinuousTargetOccurrence == 0 {
conf.ContinuousTargetOccurrence = 1
}
type Result struct {
Result interface{}
State string
Error error
Done bool
}
// Read every result from the refresh loop, waiting for a positive result.Done.
resCh := make(chan Result, 1)
// cancellation channel for the refresh loop
cancelCh := make(chan struct{})
result := Result{}
go func() {
defer close(resCh)
select {
case <-time.After(conf.Delay):
case <-cancelCh:
return
}
// start with 0 delay for the first loop
var wait time.Duration
for {
// store the last result
resCh <- result
// wait and watch for cancellation
select {
case <-cancelCh:
return
case <-time.After(wait):
// first round had no wait
if wait == 0 {
wait = 100 * time.Millisecond
}
}
res, currentState, err := conf.Refresh()
result = Result{
Result: res,
State: currentState,
Error: err,
}
if err != nil {
resCh <- result
return
}
// If we're waiting for the absence of a thing, then return
if res == nil && len(conf.Target) == 0 {
targetOccurrence++
if conf.ContinuousTargetOccurrence == targetOccurrence {
result.Done = true
resCh <- result
return
}
continue
}
if res == nil {
// If we didn't find the resource, check if we have been
// not finding it for a while, and if so, report an error.
notfoundTick++
if notfoundTick > conf.NotFoundChecks {
result.Error = &NotFoundError{
LastError: err,
Retries: notfoundTick,
}
resCh <- result
return
}
} else {
// Reset the counter for when a resource isn't found
notfoundTick = 0
found := false
for _, allowed := range conf.Target {
if currentState == allowed {
found = true
targetOccurrence++
if conf.ContinuousTargetOccurrence == targetOccurrence {
result.Done = true
resCh <- result
return
}
continue
}
}
for _, allowed := range conf.Pending {
if currentState == allowed {
found = true
targetOccurrence = 0
break
}
}
if !found && len(conf.Pending) > 0 {
result.Error = &UnexpectedStateError{
LastError: err,
State: result.State,
ExpectedState: conf.Target,
}
resCh <- result
return
}
}
// Wait between refreshes using exponential backoff, except when
// waiting for the target state to reoccur.
if targetOccurrence == 0 {
wait *= 2
}
// If a poll interval has been specified, choose that interval.
// Otherwise, bound the default value.
if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
wait = conf.PollInterval
} else {
if wait < conf.MinTimeout {
wait = conf.MinTimeout
} else if wait > 10*time.Second {
wait = 10 * time.Second
}
}
// fs.Debugf(entityType, "[TRACE] Waiting %s before next try", wait)
}
}()
// store the last value result from the refresh loop
lastResult := Result{}
timeout := time.After(conf.Timeout)
for {
select {
case r, ok := <-resCh:
// channel closed, so return the last result
if !ok {
return lastResult.Result, lastResult.Error
}
// we reached the intended state
if r.Done {
return r.Result, r.Error
}
// still waiting, store the last result
lastResult = r
case <-ctx.Done():
close(cancelCh)
return nil, ctx.Err()
case <-timeout:
// fs.Debugf(entityType, "[WARN] WaitForState timeout after %s", conf.Timeout)
// fs.Debugf(entityType, "[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
// cancel the goroutine and start our grace period timer
close(cancelCh)
timeout := time.After(refreshGracePeriod)
// we need a for loop and a label to break on, because we may have
// an extra response value to read, but still want to wait for the
// channel to close.
forSelect:
for {
select {
case r, ok := <-resCh:
if r.Done {
// the last refresh loop reached the desired state
return r.Result, r.Error
}
if !ok {
// the goroutine returned
break forSelect
}
// target state not reached, save the result for the
// TimeoutError and wait for the channel to close
lastResult = r
case <-ctx.Done():
fs.Errorf(entityType, "Context cancellation detected, abandoning grace period")
break forSelect
case <-timeout:
fs.Errorf(entityType, "WaitForState exceeded refresh grace period")
break forSelect
}
}
return nil, &TimeoutError{
LastError: lastResult.Error,
LastState: lastResult.State,
Timeout: conf.Timeout,
ExpectedState: conf.Target,
}
}
}
}
// NotFoundError resource not found error
type NotFoundError struct {
LastError error
LastRequest interface{}
LastResponse interface{}
Message string
Retries int
}
func (e *NotFoundError) Error() string {
if e.Message != "" {
return e.Message
}
if e.Retries > 0 {
return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
}
return "couldn't find resource"
}
func (e *NotFoundError) Unwrap() error {
return e.LastError
}
// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
type UnexpectedStateError struct {
LastError error
State string
ExpectedState []string
}
func (e *UnexpectedStateError) Error() string {
return fmt.Sprintf(
"unexpected state '%s', wanted target '%s'. last error: %s",
e.State,
strings.Join(e.ExpectedState, ", "),
e.LastError,
)
}
func (e *UnexpectedStateError) Unwrap() error {
return e.LastError
}
// TimeoutError is returned when WaitForState times out
type TimeoutError struct {
LastError error
LastState string
Timeout time.Duration
ExpectedState []string
}
func (e *TimeoutError) Error() string {
expectedState := "resource to be gone"
if len(e.ExpectedState) > 0 {
expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
}
extraInfo := make([]string, 0)
if e.LastState != "" {
extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
}
if e.Timeout > 0 {
extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
}
suffix := ""
if len(extraInfo) > 0 {
suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
}
if e.LastError != nil {
return fmt.Sprintf("timeout while waiting for %s%s: %s",
expectedState, suffix, e.LastError)
}
return fmt.Sprintf("timeout while waiting for %s%s",
expectedState, suffix)
}
func (e *TimeoutError) Unwrap() error {
return e.LastError
}

View File

@@ -15,6 +15,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -25,8 +26,6 @@ import (
"sync"
"time"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
@@ -58,7 +57,6 @@ import (
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"github.com/rclone/rclone/lib/version"
"golang.org/x/net/http/httpguts"
"golang.org/x/sync/errgroup"
)
@@ -66,7 +64,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
NewFs: NewFs,
CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
@@ -105,7 +103,7 @@ func init() {
Help: "Arvan Cloud Object Storage (AOS)",
}, {
Value: "DigitalOcean",
Help: "DigitalOcean Spaces",
Help: "Digital Ocean Spaces",
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
@@ -118,15 +116,9 @@ func init() {
}, {
Value: "IDrive",
Help: "IDrive e2",
}, {
Value: "IONOS",
Help: "IONOS Cloud",
}, {
Value: "LyveCloud",
Help: "Seagate Lyve Cloud",
}, {
Value: "Liara",
Help: "Liara Object Storage",
}, {
Value: "Minio",
Help: "Minio Object Storage",
@@ -154,9 +146,6 @@ func init() {
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
}, {
Value: "Qiniu",
Help: "Qiniu Object Storage (Kodo)",
}, {
Value: "Other",
Help: "Any other S3 compatible provider",
@@ -395,52 +384,10 @@ func init() {
Value: "auto",
Help: "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.",
}},
}, {
// References:
// https://developer.qiniu.com/kodo/4088/s3-access-domainname
Name: "region",
Help: "Region to connect to.",
Provider: "Qiniu",
Examples: []fs.OptionExample{{
Value: "cn-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nEast China Region 1.\nNeeds location constraint cn-east-1.",
}, {
Value: "cn-east-2",
Help: "East China Region 2.\nNeeds location constraint cn-east-2.",
}, {
Value: "cn-north-1",
Help: "North China Region 1.\nNeeds location constraint cn-north-1.",
}, {
Value: "cn-south-1",
Help: "South China Region 1.\nNeeds location constraint cn-south-1.",
}, {
Value: "us-north-1",
Help: "North America Region.\nNeeds location constraint us-north-1.",
}, {
Value: "ap-southeast-1",
Help: "Southeast Asia Region 1.\nNeeds location constraint ap-southeast-1.",
}, {
Value: "ap-northeast-1",
Help: "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "IONOS",
Examples: []fs.OptionExample{{
Value: "de",
Help: "Frankfurt, Germany",
}, {
Value: "eu-central-2",
Help: "Berlin, Germany",
}, {
Value: "eu-south-2",
Help: "Logrono, Spain",
}},
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -751,29 +698,6 @@ func init() {
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
Help: "Singapore Single Site Private Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
Provider: "IONOS",
Examples: []fs.OptionExample{{
Value: "s3-eu-central-1.ionoscloud.com",
Help: "Frankfurt, Germany",
}, {
Value: "s3-eu-central-2.ionoscloud.com",
Help: "Berlin, Germany",
}, {
Value: "s3-eu-south-2.ionoscloud.com",
Help: "Logrono, Spain",
}},
}, {
// Liara endpoints: https://liara.ir/landing/object-storage
Name: "endpoint",
Help: "Endpoint for Liara Object Storage API.",
Provider: "Liara",
Examples: []fs.OptionExample{{
Value: "storage.iran.liara.space",
Help: "The default endpoint\nIran",
}},
}, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint",
@@ -936,11 +860,17 @@ func init() {
}},
}, {
Name: "endpoint",
Help: "Endpoint for Storj Gateway.",
Help: "Endpoint of the Shared Gateway.",
Provider: "Storj",
Examples: []fs.OptionExample{{
Value: "gateway.storjshare.io",
Help: "Global Hosted Gateway",
Value: "gateway.eu1.storjshare.io",
Help: "EU1 Shared Gateway",
}, {
Value: "gateway.us1.storjshare.io",
Help: "US1 Shared Gateway",
}, {
Value: "gateway.ap1.storjshare.io",
Help: "Asia-Pacific Shared Gateway",
}},
}, {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
@@ -1068,64 +998,25 @@ func init() {
Value: "nz.s3.rackcorp.com",
Help: "Auckland (New Zealand) Endpoint",
}},
}, {
// Qiniu endpoints: https://developer.qiniu.com/kodo/4088/s3-access-domainname
Name: "endpoint",
Help: "Endpoint for Qiniu Object Storage.",
Provider: "Qiniu",
Examples: []fs.OptionExample{{
Value: "s3-cn-east-1.qiniucs.com",
Help: "East China Endpoint 1",
}, {
Value: "s3-cn-east-2.qiniucs.com",
Help: "East China Endpoint 2",
}, {
Value: "s3-cn-north-1.qiniucs.com",
Help: "North China Endpoint 1",
}, {
Value: "s3-cn-south-1.qiniucs.com",
Help: "South China Endpoint 1",
}, {
Value: "s3-us-north-1.qiniucs.com",
Help: "North America Endpoint 1",
}, {
Value: "s3-ap-southeast-1.qiniucs.com",
Help: "Southeast Asia Endpoint 1",
}, {
Value: "s3-ap-northeast-1.qiniucs.com",
Help: "Northeast Asia Endpoint 1",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
Provider: "!AWS,IBMCOS,IDrive,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
Value: "syd1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Sydney 1",
Provider: "DigitalOcean",
}, {
Value: "sfo3.digitaloceanspaces.com",
Help: "DigitalOcean Spaces San Francisco 3",
Provider: "DigitalOcean",
}, {
Value: "fra1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Frankfurt 1",
Provider: "DigitalOcean",
}, {
Value: "nyc3.digitaloceanspaces.com",
Help: "DigitalOcean Spaces New York 3",
Help: "Digital Ocean Spaces New York 3",
Provider: "DigitalOcean",
}, {
Value: "ams3.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Amsterdam 3",
Help: "Digital Ocean Spaces Amsterdam 3",
Provider: "DigitalOcean",
}, {
Value: "sgp1.digitaloceanspaces.com",
Help: "DigitalOcean Spaces Singapore 1",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "localhost:8333",
@@ -1145,39 +1036,15 @@ func init() {
Provider: "LyveCloud",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East 1 (N. Virginia)",
Provider: "Wasabi",
}, {
Value: "s3.us-east-2.wasabisys.com",
Help: "Wasabi US East 2 (N. Virginia)",
Provider: "Wasabi",
}, {
Value: "s3.us-central-1.wasabisys.com",
Help: "Wasabi US Central 1 (Texas)",
Help: "Wasabi US East endpoint",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West 1 (Oregon)",
Provider: "Wasabi",
}, {
Value: "s3.ca-central-1.wasabisys.com",
Help: "Wasabi CA Central 1 (Toronto)",
Help: "Wasabi US West endpoint",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central 1 (Amsterdam)",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-2.wasabisys.com",
Help: "Wasabi EU Central 2 (Frankfurt)",
Provider: "Wasabi",
}, {
Value: "s3.eu-west-1.wasabisys.com",
Help: "Wasabi EU West 1 (London)",
Provider: "Wasabi",
}, {
Value: "s3.eu-west-2.wasabisys.com",
Help: "Wasabi EU West 2 (Paris)",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
@@ -1187,18 +1054,6 @@ func init() {
Value: "s3.ap-northeast-2.wasabisys.com",
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-southeast-1.wasabisys.com",
Help: "Wasabi AP Southeast 1 (Singapore)",
Provider: "Wasabi",
}, {
Value: "s3.ap-southeast-2.wasabisys.com",
Help: "Wasabi AP Southeast 2 (Sydney)",
Provider: "Wasabi",
}, {
Value: "storage.iran.liara.space",
Help: "Liara Iran endpoint",
Provider: "Liara",
}, {
Value: "s3.ir-thr-at1.arvanstorage.com",
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
@@ -1553,36 +1408,10 @@ func init() {
Value: "nz",
Help: "Auckland (New Zealand) Region",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
Provider: "Qiniu",
Examples: []fs.OptionExample{{
Value: "cn-east-1",
Help: "East China Region 1",
}, {
Value: "cn-east-2",
Help: "East China Region 2",
}, {
Value: "cn-north-1",
Help: "North China Region 1",
}, {
Value: "cn-south-1",
Help: "South China Region 1",
}, {
Value: "us-north-1",
Help: "North America Region 1",
}, {
Value: "ap-southeast-1",
Help: "Southeast Asia Region 1",
}, {
Value: "ap-northeast-1",
Help: "Northeast Asia Region 1",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
Provider: "!AWS,IBMCOS,IDrive,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -1592,11 +1421,7 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
If the acl is an empty string then no X-Amz-Acl: header is added and
the default (private) will be used.
`,
doesn't copy the ACL from the source but rather writes a fresh one.`,
Provider: "!Storj,Cloudflare",
Examples: []fs.OptionExample{{
Value: "default",
@@ -1650,11 +1475,7 @@ the default (private) will be used.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
header is added and the default (private) will be used.
`,
isn't set then "acl" is used instead.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
@@ -1714,21 +1535,8 @@ header is added and the default (private) will be used.
Help: "arn:aws:kms:*",
}},
}, {
Name: "sse_customer_key",
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
Alternatively you can provide --sse-customer-key-base64.`,
Provider: "AWS,Ceph,ChinaMobile,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_key_base64",
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
Alternatively you can provide --sse-customer-key.`,
Name: "sse_customer_key",
Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
Provider: "AWS,Ceph,ChinaMobile,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
@@ -1815,15 +1623,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Value: "STANDARD_IA",
Help: "Infrequent access storage mode",
}},
}, {
// Mapping from here: https://liara.ir/landing/object-storage
Name: "storage_class",
Help: "The storage class to use when storing new objects in Liara",
Provider: "Liara",
Examples: []fs.OptionExample{{
Value: "STANDARD",
Help: "Standard storage class",
}},
}, {
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
Name: "storage_class",
@@ -1866,24 +1665,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Value: "GLACIER",
Help: "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.",
}},
}, {
// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
Name: "storage_class",
Help: "The storage class to use when storing new objects in Qiniu.",
Provider: "Qiniu",
Examples: []fs.OptionExample{{
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "LINE",
Help: "Infrequent access storage mode",
}, {
Value: "GLACIER",
Help: "Archive storage mode",
}, {
Value: "DEEP_ARCHIVE",
Help: "Deep archive storage mode",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload.
@@ -2236,36 +2017,6 @@ can't check the size and hash but the file contents will be decompressed.
`,
Advanced: true,
Default: false,
}, {
Name: "might_gzip",
Help: strings.ReplaceAll(`Set this if the backend might gzip objects.
Normally providers will not alter objects when they are downloaded. If
an object was not uploaded with |Content-Encoding: gzip| then it won't
be set on download.
However some providers may gzip objects even if they weren't uploaded
with |Content-Encoding: gzip| (eg Cloudflare).
A symptom of this would be receiving errors like
ERROR corrupted on transfer: sizes differ NNN vs MMM
If you set this flag and rclone downloads an object with
Content-Encoding: gzip set and chunked transfer encoding, then rclone
will decompress the object on the fly.
If this is set to unset (the default) then rclone will choose
according to the provider setting what to apply, but you can override
rclone's choice here.
`, "|", "`"),
Default: fs.Tristate{},
Advanced: true,
}, {
Name: "no_system_metadata",
Help: `Suppress setting and reading of system metadata`,
Advanced: true,
Default: false,
},
}})
}
@@ -2360,7 +2111,6 @@ type Options struct {
SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
@@ -2392,8 +2142,6 @@ type Options struct {
Versions bool `config:"versions"`
VersionAt fs.Time `config:"version_at"`
Decompress bool `config:"decompress"`
MightGzip fs.Tristate `config:"might_gzip"`
NoSystemMetadata bool `config:"no_system_metadata"`
}
// Fs represents a remote s3 server
@@ -2753,12 +2501,10 @@ func setQuirks(opt *Options) {
virtualHostStyle = true
urlEncodeListings = true
useMultipartEtag = true
mightGzip = true // assume all providers might gzip until proven otherwise
)
switch opt.Provider {
case "AWS":
// No quirks
mightGzip = false // Never auto gzips objects
case "Alibaba":
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
case "HuaweiOBS":
@@ -2791,14 +2537,6 @@ func setQuirks(opt *Options) {
useMultipartEtag = false // untested
case "IDrive":
virtualHostStyle = false
case "IONOS":
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
virtualHostStyle = false
urlEncodeListings = false
case "Liara":
virtualHostStyle = false
urlEncodeListings = false
useMultipartEtag = false
case "LyveCloud":
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
case "Minio":
@@ -2835,9 +2573,6 @@ func setQuirks(opt *Options) {
useMultipartEtag = false // untested
case "Wasabi":
// No quirks
case "Qiniu":
useMultipartEtag = false
urlEncodeListings = false
case "Other":
listObjectsV2 = false
virtualHostStyle = false
@@ -2876,12 +2611,6 @@ func setQuirks(opt *Options) {
opt.UseMultipartEtag.Valid = true
opt.UseMultipartEtag.Value = useMultipartEtag
}
// set MightGzip if not manually set
if !opt.MightGzip.Valid {
opt.MightGzip.Valid = true
opt.MightGzip.Value = mightGzip
}
}
// setRoot changes the root of the Fs
@@ -2890,14 +2619,6 @@ func (f *Fs) setRoot(root string) {
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
}
// return a pointer to the string if non empty or nil if it is empty
func stringPointerOrNil(s string) *string {
if s == "" {
return nil
}
return &s
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
@@ -2917,19 +2638,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Versions && opt.VersionAt.IsSet() {
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
if opt.SSECustomerKeyBase64 != "" && opt.SSECustomerKey != "" {
return nil, errors.New("s3: can't use sse_customer_key and sse_customer_key_base64 at the same time")
} else if opt.SSECustomerKeyBase64 != "" {
// Decode the base64-encoded key and store it in the SSECustomerKey field
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
if err != nil {
return nil, fmt.Errorf("s3: Could not decode sse_customer_key_base64: %w", err)
}
opt.SSECustomerKey = string(decoded)
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
// calculate CustomerKeyMD5 if not supplied
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
@@ -3004,6 +2718,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return f, fs.ErrorIsFile
}
if opt.Provider == "Storj" {
f.features.Copy = nil
f.features.SetTier = false
f.features.GetTier = false
}
@@ -3062,17 +2777,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
return info, versionID, nil
}
// stringClonePointer clones the string pointed to by sp into new
// memory. This is useful to stop us keeping references to small
// strings carved out of large XML responses.
func stringClonePointer(sp *string) *string {
if sp == nil {
return nil
}
var s = *sp
return &s
}
// Return an Object from a path
//
// If it can't be found it returns the error ErrorObjectNotFound.
@@ -3098,8 +2802,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
}
o.setMD5FromEtag(aws.StringValue(info.ETag))
o.bytes = aws.Int64Value(info.Size)
o.storageClass = stringClonePointer(info.StorageClass)
o.versionID = stringClonePointer(versionID)
o.storageClass = info.StorageClass
o.versionID = versionID
} else if !o.fs.opt.NoHeadObject {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
@@ -3117,13 +2821,19 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Gets the bucket location
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
region, err := s3manager.GetBucketRegion(ctx, f.ses, bucket, "", func(r *request.Request) {
r.Config.S3ForcePathStyle = aws.Bool(f.opt.ForcePathStyle)
req := s3.GetBucketLocationInput{
Bucket: &bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(ctx, err)
})
if err != nil {
return "", err
}
return region, nil
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
}
// Updates the region for the bucket by reading the region from the
@@ -3237,14 +2947,8 @@ func (f *Fs) newV2List(req *s3.ListObjectsV2Input) bucketLister {
// Do a V2 listing
func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
resp, err = ls.f.c.ListObjectsV2WithContext(ctx, &ls.req)
if err != nil {
return nil, nil, err
}
if aws.BoolValue(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") {
return nil, nil, errors.New("s3 protocol error: received listing v2 with IsTruncated set and no NextContinuationToken. Should you be using `--s3-list-version 1`?")
}
ls.req.ContinuationToken = resp.NextContinuationToken
return resp, nil, nil
return resp, nil, err
}
// URL Encode the listings
@@ -3801,7 +3505,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
return f.cache.Create(bucket, func() error {
req := s3.CreateBucketInput{
Bucket: &bucket,
ACL: stringPointerOrNil(f.opt.BucketACL),
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
@@ -3866,7 +3570,7 @@ func pathEscape(s string) string {
// method
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
req.Bucket = &dstBucket
req.ACL = stringPointerOrNil(f.opt.ACL)
req.ACL = &f.opt.ACL
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
if src.versionID != nil {
@@ -4744,15 +4448,7 @@ func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
o.lastModified = time.Now()
fs.Logf(o, "Failed to read last modified")
} else {
// Try to keep the maximum precision in lastModified. If we read
// it from listings then it may have millisecond precision, but
// if we read it from a HEAD/GET request then it will have
// second precision.
equalToWithinOneSecond := o.lastModified.Truncate(time.Second).Equal((*resp.LastModified).Truncate(time.Second))
newHasNs := (*resp.LastModified).Nanosecond() != 0
if !equalToWithinOneSecond || newHasNs {
o.lastModified = *resp.LastModified
}
o.lastModified = *resp.LastModified
}
o.mimeType = aws.StringValue(resp.ContentType)
@@ -4844,12 +4540,23 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
return nil, err
}
contentLength := rest.ParseSizeFromHeaders(resp.Header)
if contentLength < 0 {
fs.Debugf(o, "Failed to parse file size from headers")
contentLength := &resp.ContentLength
if resp.Header.Get("Content-Range") != "" {
var contentRange = resp.Header.Get("Content-Range")
slash := strings.IndexRune(contentRange, '/')
if slash >= 0 {
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
if err == nil {
contentLength = &i
} else {
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
}
} else {
fs.Debugf(o, "Failed to find length in %q", contentRange)
}
}
lastModified, err := http.ParseTime(resp.Header.Get("Last-Modified"))
lastModified, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
if err != nil {
fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err)
}
@@ -4873,7 +4580,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
var head = s3.HeadObjectOutput{
ETag: header("Etag"),
ContentLength: &contentLength,
ContentLength: contentLength,
LastModified: &lastModified,
Metadata: metaData,
CacheControl: header("Cache-Control"),
@@ -4972,7 +4679,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// Decompress body if necessary
if aws.StringValue(resp.ContentEncoding) == "gzip" {
if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
if o.fs.opt.Decompress {
return readers.NewGzipReader(resp.Body)
}
o.fs.warnCompressed.Do(func() {
@@ -5219,7 +4926,7 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
// Can't upload zero length files like this for some reason
r.Body = bytes.NewReader([]byte{})
} else {
r.SetStreamingBody(io.NopCloser(in))
r.SetStreamingBody(ioutil.NopCloser(in))
}
r.SetContext(ctx)
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
@@ -5333,7 +5040,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
req := s3.PutObjectInput{
Bucket: &bucket,
ACL: stringPointerOrNil(o.fs.opt.ACL),
ACL: &o.fs.opt.ACL,
Key: &bucketPath,
}
@@ -5347,10 +5054,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
for k, v := range meta {
pv := aws.String(v)
k = strings.ToLower(k)
if o.fs.opt.NoSystemMetadata {
req.Metadata[k] = pv
continue
}
switch k {
case "cache-control":
req.CacheControl = pv
@@ -5471,20 +5174,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
}
// Check metadata keys and values are valid
for key, value := range req.Metadata {
if !httpguts.ValidHeaderFieldName(key) {
fs.Errorf(o, "Dropping invalid metadata key %q", key)
delete(req.Metadata, key)
} else if value == nil {
fs.Errorf(o, "Dropping nil metadata value for key %q", key)
delete(req.Metadata, key)
} else if !httpguts.ValidHeaderFieldValue(*value) {
fs.Errorf(o, "Dropping invalid metadata value %q for key %q", *value, key)
delete(req.Metadata, key)
}
}
var wantETag string // Multipart upload Etag to check
var gotEtag string // Etag we got from the upload
var lastModified time.Time // Time we got from the upload
@@ -5501,12 +5190,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
if err != nil {
return err
}
// Only record versionID if we are using --s3-versions or --s3-version-at
if o.fs.opt.Versions || o.fs.opt.VersionAt.IsSet() {
o.versionID = versionID
} else {
o.versionID = nil
}
o.versionID = versionID
// User requested we don't HEAD the object after uploading it
// so make up the object as best we can assuming it got
@@ -5634,9 +5318,6 @@ func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error)
// Set system metadata
setMetadata := func(k string, v *string) {
if o.fs.opt.NoSystemMetadata {
return
}
if v == nil || *v == "" {
return
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -632,7 +633,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
})
if start > 0 {
// We need to read and discard the beginning of the data...
_, err = io.CopyN(io.Discard, resp.Body, start)
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
if err != nil {
return nil, err
}

View File

@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"regexp"
@@ -122,10 +123,7 @@ This enables the use of the following insecure ciphers and key exchange methods:
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group-exchange-sha1
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.
This must be false if you use either ciphers or key_exchange advanced options.
`,
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
Default: false,
Examples: []fs.OptionExample{
{
@@ -327,46 +325,6 @@ and pass variables with spaces in in quotes, eg
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
`,
Advanced: true,
}, {
Name: "ciphers",
Default: fs.SpaceSepList{},
Help: `Space separated list of ciphers to be used for session encryption, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q cipher.
This must not be set if use_insecure_cipher is true.
Example:
aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com
`,
Advanced: true,
}, {
Name: "key_exchange",
Default: fs.SpaceSepList{},
Help: `Space separated list of key exchange algorithms, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q kex.
This must not be set if use_insecure_cipher is true.
Example:
sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256
`,
Advanced: true,
}, {
Name: "macs",
Default: fs.SpaceSepList{},
Help: `Space separated list of MACs (message authentication code) algorithms, ordered by preference.
At least one must match with server configuration. This can be checked for example using ssh -Q mac.
Example:
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
`,
Advanced: true,
}},
@@ -404,9 +362,6 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Concurrency int `config:"concurrency"`
SetEnv fs.SpaceSepList `config:"set_env"`
Ciphers fs.SpaceSepList `config:"ciphers"`
KeyExchange fs.SpaceSepList `config:"key_exchange"`
MACs fs.SpaceSepList `config:"macs"`
}
// Fs stores the interface to the remote SFTP files
@@ -747,25 +702,10 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
sshConfig.HostKeyCallback = hostcallback
}
if opt.UseInsecureCipher && (opt.Ciphers != nil || opt.KeyExchange != nil) {
return nil, fmt.Errorf("use_insecure_cipher must be false if ciphers or key_exchange are set in advanced configuration")
}
sshConfig.Config.SetDefaults()
if opt.UseInsecureCipher {
sshConfig.Config.SetDefaults()
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
} else {
if opt.Ciphers != nil {
sshConfig.Config.Ciphers = opt.Ciphers
}
if opt.KeyExchange != nil {
sshConfig.Config.KeyExchanges = opt.KeyExchange
}
}
if opt.MACs != nil {
sshConfig.Config.MACs = opt.MACs
}
keyFile := env.ShellExpand(opt.KeyFile)
@@ -782,7 +722,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
}
if keyFile != "" {
pubBytes, err := os.ReadFile(keyFile + ".pub")
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
if err != nil {
return nil, fmt.Errorf("failed to read public key file: %w", err)
}
@@ -811,7 +751,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if keyFile != "" || opt.KeyPem != "" {
var key []byte
if opt.KeyPem == "" {
key, err = os.ReadFile(keyFile)
key, err = ioutil.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("failed to read private key file: %w", err)
}
@@ -842,7 +782,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// If a public key has been specified then use that
if pubkeyFile != "" {
certfile, err := os.ReadFile(pubkeyFile)
certfile, err := ioutil.ReadFile(pubkeyFile)
if err != nil {
return nil, fmt.Errorf("unable to read cert file: %w", err)
}
@@ -975,24 +915,20 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
err = session.Run(shellCmd)
_ = session.Close()
f.shellType = defaultShellType
if err != nil {
f.shellType = defaultShellType
fs.Debugf(f, "Remote command failed: %v (stdout=%v) (stderr=%v)", err, bytes.TrimSpace(stdout.Bytes()), bytes.TrimSpace(stderr.Bytes()))
} else {
outBytes := stdout.Bytes()
fs.Debugf(f, "Remote command result: %s", outBytes)
outString := string(bytes.TrimSpace(stdout.Bytes()))
if outString != "" {
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // PowerShell: "Microsoft.PowerShell%ComSpec%"
f.shellType = "powershell"
} else if !strings.HasSuffix(outString, "%ComSpec%") { // Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
// Additional positive test, to avoid misdetection on unpredicted Unix shell variants
s := strings.ToLower(outString)
if strings.Contains(s, ".exe") || strings.Contains(s, ".com") {
f.shellType = "cmd"
}
} // POSIX-based Unix shell: "%ComSpec%"
} // fish Unix shell: ""
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // If PowerShell: "Microsoft.PowerShell%ComSpec%"
f.shellType = "powershell"
} else if !strings.HasSuffix(outString, "%ComSpec%") { // If Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
f.shellType = "cmd"
} else { // If Unix: "%ComSpec%"
f.shellType = "unix"
}
}
}
// Save permanently in config to avoid the extra work next time
@@ -1235,10 +1171,6 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
err = c.sftpClient.Mkdir(dirPath)
f.putSftpConnection(&c, err)
if err != nil {
if os.IsExist(err) {
fs.Debugf(f, "directory %q exists after Mkdir is attempted", dirPath)
return nil
}
return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
}
return nil
@@ -1775,14 +1707,11 @@ func (o *Object) setMetadata(info os.FileInfo) {
// statRemote stats the file or directory at the remote given
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
absPath := remote
if !strings.HasPrefix(remote, "/") {
absPath = path.Join(f.absRoot, remote)
}
c, err := f.getSftpConnection(ctx)
if err != nil {
return nil, fmt.Errorf("stat: %w", err)
}
absPath := path.Join(f.absRoot, remote)
info, err = c.sftpClient.Stat(absPath)
f.putSftpConnection(&c, err)
return info, err

View File

@@ -77,6 +77,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -478,7 +479,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to open timezone db: %w", err)
}
tzdata, err := io.ReadAll(timezone)
tzdata, err := ioutil.ReadAll(timezone)
if err != nil {
return nil, fmt.Errorf("failed to read timezone: %w", err)
}

View File

@@ -10,6 +10,7 @@ import (
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
pathpkg "path"
@@ -118,7 +119,7 @@ func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
}
if f.grPos < f.seekPos {
// Fast-forward.
_, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos)
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
if err != nil {
return 0, err
}

View File

@@ -1,233 +0,0 @@
package smb
import (
"context"
"fmt"
"net"
"sync/atomic"
"time"
smb2 "github.com/hirochachacha/go-smb2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fshttp"
)
// dial starts a client connection to the given SMB server. It is a
// convenience function that connects to the given network address,
// initiates the SMB handshake, and then sets up a Client.
func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
dialer := fshttp.NewDialer(ctx)
tconn, err := dialer.Dial(network, addr)
if err != nil {
return nil, err
}
pass := ""
if f.opt.Pass != "" {
pass, err = obscure.Reveal(f.opt.Pass)
if err != nil {
return nil, err
}
}
d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: f.opt.User,
Password: pass,
Domain: f.opt.Domain,
},
}
session, err := d.DialContext(ctx, tconn)
if err != nil {
return nil, err
}
return &conn{
smbSession: session,
conn: &tconn,
}, nil
}
// conn encapsulates a SMB client and corresponding SMB client
type conn struct {
conn *net.Conn
smbSession *smb2.Session
smbShare *smb2.Share
shareName string
}
// Closes the connection
func (c *conn) close() (err error) {
if c.smbShare != nil {
err = c.smbShare.Umount()
}
sessionLogoffErr := c.smbSession.Logoff()
if err != nil {
return err
}
return sessionLogoffErr
}
// True if it's closed
func (c *conn) closed() bool {
var nopErr error
if c.smbShare != nil {
// stat the current directory
_, nopErr = c.smbShare.Stat(".")
} else {
// list the shares
_, nopErr = c.smbSession.ListSharenames()
}
return nopErr == nil
}
// Show that we are using a SMB session
//
// Call removeSession() when done
func (f *Fs) addSession() {
atomic.AddInt32(&f.sessions, 1)
}
// Show the SMB session is no longer in use
func (f *Fs) removeSession() {
atomic.AddInt32(&f.sessions, -1)
}
// getSessions shows whether there are any sessions in use
func (f *Fs) getSessions() int32 {
return atomic.LoadInt32(&f.sessions)
}
// Open a new connection to the SMB server.
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
// As we are pooling these connections we need to decouple
// them from the current context
ctx = context.Background()
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
if err != nil {
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
}
if share != "" {
// mount the specified share as well if user requested
c.smbShare, err = c.smbSession.Mount(share)
if err != nil {
_ = c.smbSession.Logoff()
return nil, fmt.Errorf("couldn't initialize SMB: %w", err)
}
c.smbShare = c.smbShare.WithContext(ctx)
}
return c, nil
}
// Ensure the specified share is mounted or the session is unmounted
func (c *conn) mountShare(share string) (err error) {
if c.shareName == share {
return nil
}
if c.smbShare != nil {
err = c.smbShare.Umount()
c.smbShare = nil
}
if err != nil {
return
}
if share != "" {
c.smbShare, err = c.smbSession.Mount(share)
if err != nil {
return
}
}
c.shareName = share
return nil
}
// Get a SMB connection from the pool, or open a new one
func (f *Fs) getConnection(ctx context.Context, share string) (c *conn, err error) {
accounting.LimitTPS(ctx)
f.poolMu.Lock()
for len(f.pool) > 0 {
c = f.pool[0]
f.pool = f.pool[1:]
err = c.mountShare(share)
if err == nil {
break
}
fs.Debugf(f, "Discarding unusable SMB connection: %v", err)
c = nil
}
f.poolMu.Unlock()
if c != nil {
return c, nil
}
err = f.pacer.Call(func() (bool, error) {
c, err = f.newConnection(ctx, share)
if err != nil {
return true, err
}
return false, nil
})
return c, err
}
// Return a SMB connection to the pool
//
// It nils the pointed to connection out so it can't be reused
func (f *Fs) putConnection(pc **conn) {
c := *pc
*pc = nil
var nopErr error
if c.smbShare != nil {
// stat the current directory
_, nopErr = c.smbShare.Stat(".")
} else {
// list the shares
_, nopErr = c.smbSession.ListSharenames()
}
if nopErr != nil {
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
_ = c.close()
return
}
f.poolMu.Lock()
f.pool = append(f.pool, c)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
f.poolMu.Unlock()
}
// Drain the pool of any connections
func (f *Fs) drainPool(ctx context.Context) (err error) {
f.poolMu.Lock()
defer f.poolMu.Unlock()
if sessions := f.getSessions(); sessions != 0 {
fs.Debugf(f, "Not closing %d unused connections as %d sessions active", len(f.pool), sessions)
if f.opt.IdleTimeout > 0 {
f.drain.Reset(time.Duration(f.opt.IdleTimeout)) // nudge on the pool emptying timer
}
return nil
}
if f.opt.IdleTimeout > 0 {
f.drain.Stop()
}
if len(f.pool) != 0 {
fs.Debugf(f, "Closing %d unused connections", len(f.pool))
}
for i, c := range f.pool {
if !c.closed() {
cErr := c.close()
if cErr != nil {
err = cErr
}
}
f.pool[i] = nil
}
f.pool = nil
return err
}

View File

@@ -1,793 +0,0 @@
// Package smb provides an interface to SMB servers
package smb
import (
"context"
"fmt"
"io"
"os"
"path"
"strings"
"sync"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/env"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
const (
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
)
var (
currentUser = env.CurrentUser()
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "smb",
Description: "SMB / CIFS",
NewFs: NewFs,
Options: []fs.Option{{
Name: "host",
Help: "SMB server hostname to connect to.\n\nE.g. \"example.com\".",
Required: true,
}, {
Name: "user",
Help: "SMB username.",
Default: currentUser,
}, {
Name: "port",
Help: "SMB port number.",
Default: 445,
}, {
Name: "pass",
Help: "SMB password.",
IsPassword: true,
}, {
Name: "domain",
Help: "Domain name for NTLM authentication.",
Default: "WORKGROUP",
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
Help: `Max time before closing idle connections.
If no connections have been returned to the connection pool in the time
given, rclone will empty the connection pool.
Set to 0 to keep connections indefinitely.
`,
Advanced: true,
}, {
Name: "hide_special_share",
Help: "Hide special shares (e.g. print$) which users aren't supposed to access.",
Default: true,
Advanced: true,
}, {
Name: "case_insensitive",
Help: "Whether the server is configured to be case-insensitive.\n\nAlways true on Windows shares.",
Default: true,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: encoder.EncodeZero |
// path separator
encoder.EncodeSlash |
encoder.EncodeBackSlash |
// windows
encoder.EncodeWin |
encoder.EncodeCtl |
encoder.EncodeDot |
// the file turns into 8.3 names (and cannot be converted back)
encoder.EncodeRightSpace |
encoder.EncodeRightPeriod |
//
encoder.EncodeInvalidUtf8,
},
}})
}
// Options defines the configuration for this backend
type Options struct {
Host string `config:"host"`
Port string `config:"port"`
User string `config:"user"`
Pass string `config:"pass"`
Domain string `config:"domain"`
HideSpecial bool `config:"hide_special_share"`
CaseInsensitive bool `config:"case_insensitive"`
IdleTimeout fs.Duration `config:"idle_timeout"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a SMB remote
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
opt Options // parsed config options
features *fs.Features // optional features
pacer *fs.Pacer // pacer for operations
sessions int32
poolMu sync.Mutex
pool []*conn
drain *time.Timer // used to drain the pool when we stop using the connections
ctx context.Context
}
// Object describes a file at the server
type Object struct {
fs *Fs // reference to Fs
remote string // the remote path
statResult os.FileInfo
}
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
root = strings.Trim(root, "/")
f := &Fs{
name: name,
opt: *opt,
ctx: ctx,
root: root,
}
f.features = (&fs.Features{
CaseInsensitive: opt.CaseInsensitive,
CanHaveEmptyDirectories: true,
BucketBased: true,
}).Fill(ctx, f)
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
// set the pool drainer timer going
if opt.IdleTimeout > 0 {
f.drain = time.AfterFunc(time.Duration(opt.IdleTimeout), func() { _ = f.drainPool(ctx) })
}
// test if the root exists as a file
share, dir := f.split("")
if share == "" || dir == "" {
return f, nil
}
cn, err := f.getConnection(ctx, share)
if err != nil {
return nil, err
}
stat, err := cn.smbShare.Stat(f.toSambaPath(dir))
f.putConnection(&cn)
if err != nil {
// ignore stat error here
return f, nil
}
if !stat.IsDir() {
f.root, err = path.Dir(root), fs.ErrorIsFile
}
fs.Debugf(f, "Using root directory %q", f.root)
return f, err
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
bucket, file := f.split("")
if bucket == "" {
return fmt.Sprintf("smb://%s@%s:%s/", f.opt.User, f.opt.Host, f.opt.Port)
}
return fmt.Sprintf("smb://%s@%s:%s/%s/%s", f.opt.User, f.opt.Host, f.opt.Port, bucket, file)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// Hashes returns nothing as SMB itself doesn't have a way to tell checksums
func (f *Fs) Hashes() hash.Set {
return hash.NewHashSet()
}
// Precision returns the precision of mtime
func (f *Fs) Precision() time.Duration {
return time.Millisecond
}
// NewObject creates a new file object
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
share, path := f.split(remote)
return f.findObjectSeparate(ctx, share, path)
}
func (f *Fs) findObjectSeparate(ctx context.Context, share, path string) (fs.Object, error) {
if share == "" || path == "" {
return nil, fs.ErrorIsDir
}
cn, err := f.getConnection(ctx, share)
if err != nil {
return nil, err
}
stat, err := cn.smbShare.Stat(f.toSambaPath(path))
f.putConnection(&cn)
if err != nil {
return nil, translateError(err, false)
}
if stat.IsDir() {
return nil, fs.ErrorIsDir
}
return f.makeEntry(share, path, stat), nil
}
// Mkdir creates a directory on the server
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
share, path := f.split(dir)
if share == "" || path == "" {
return nil
}
cn, err := f.getConnection(ctx, share)
if err != nil {
return err
}
err = cn.smbShare.MkdirAll(f.toSambaPath(path), 0o755)
f.putConnection(&cn)
return err
}
// Rmdir removes an empty directory on the server
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
share, path := f.split(dir)
if share == "" || path == "" {
return nil
}
cn, err := f.getConnection(ctx, share)
if err != nil {
return err
}
err = cn.smbShare.Remove(f.toSambaPath(path))
f.putConnection(&cn)
return err
}
// Put uploads a file
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
}
err := o.Update(ctx, in, src, options...)
if err == nil {
return o, nil
}
return nil, err
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
//
// May create the object even if it returns an error - if so
// will return the object and the error, otherwise will return
// nil and the error
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
o := &Object{
fs: f,
remote: src.Remote(),
}
err := o.Update(ctx, in, src, options...)
if err == nil {
return o, nil
}
return nil, err
}
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (_ fs.Object, err error) {
dstShare, dstPath := f.split(remote)
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return nil, fs.ErrorCantMove
}
srcShare, srcPath := srcObj.split()
if dstShare != srcShare {
fs.Debugf(src, "Can't move - must be on the same share")
return nil, fs.ErrorCantMove
}
err = f.ensureDirectory(ctx, dstShare, dstPath)
if err != nil {
return nil, fmt.Errorf("failed to make parent directories: %w", err)
}
cn, err := f.getConnection(ctx, dstShare)
if err != nil {
return nil, err
}
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
f.putConnection(&cn)
if err != nil {
return nil, translateError(err, false)
}
return f.findObjectSeparate(ctx, dstShare, dstPath)
}
// DirMove moves src, srcRemote to this remote at dstRemote
// using server-side move operations.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
dstShare, dstPath := f.split(dstRemote)
srcFs, ok := src.(*Fs)
if !ok {
fs.Debugf(src, "Can't move - not same remote type")
return fs.ErrorCantDirMove
}
srcShare, srcPath := srcFs.split(srcRemote)
if dstShare != srcShare {
fs.Debugf(src, "Can't move - must be on the same share")
return fs.ErrorCantDirMove
}
err = f.ensureDirectory(ctx, dstShare, dstPath)
if err != nil {
return fmt.Errorf("failed to make parent directories: %w", err)
}
cn, err := f.getConnection(ctx, dstShare)
if err != nil {
return err
}
defer f.putConnection(&cn)
_, err = cn.smbShare.Stat(dstPath)
if os.IsNotExist(err) {
err = cn.smbShare.Rename(f.toSambaPath(srcPath), f.toSambaPath(dstPath))
return translateError(err, true)
}
return fs.ErrorDirExists
}
// List files and directories in a directory
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
share, _path := f.split(dir)
cn, err := f.getConnection(ctx, share)
if err != nil {
return nil, err
}
defer f.putConnection(&cn)
if share == "" {
shares, err := cn.smbSession.ListSharenames()
for _, shh := range shares {
shh = f.toNativePath(shh)
if strings.HasSuffix(shh, "$") && f.opt.HideSpecial {
continue
}
entries = append(entries, fs.NewDir(shh, time.Time{}))
}
return entries, err
}
dirents, err := cn.smbShare.ReadDir(f.toSambaPath(_path))
if err != nil {
return entries, translateError(err, true)
}
for _, file := range dirents {
nfn := f.toNativePath(file.Name())
if file.IsDir() {
entries = append(entries, fs.NewDir(path.Join(dir, nfn), file.ModTime()))
} else {
entries = append(entries, f.makeEntryRelative(share, _path, nfn, file))
}
}
return entries, nil
}
// About returns things about remaining and used spaces
func (f *Fs) About(ctx context.Context) (_ *fs.Usage, err error) {
share, dir := f.split("/")
if share == "" {
return nil, fs.ErrorListBucketRequired
}
dir = f.toSambaPath(dir)
cn, err := f.getConnection(ctx, share)
if err != nil {
return nil, err
}
stat, err := cn.smbShare.Statfs(dir)
f.putConnection(&cn)
if err != nil {
return nil, err
}
bs := int64(stat.BlockSize())
usage := &fs.Usage{
Total: fs.NewUsageValue(bs * int64(stat.TotalBlockCount())),
Used: fs.NewUsageValue(bs * int64(stat.TotalBlockCount()-stat.FreeBlockCount())),
Free: fs.NewUsageValue(bs * int64(stat.AvailableBlockCount())),
}
return usage, nil
}
// Shutdown the backend, closing any background tasks and any
// cached connections.
func (f *Fs) Shutdown(ctx context.Context) error {
return f.drainPool(ctx)
}
func (f *Fs) makeEntry(share, _path string, stat os.FileInfo) *Object {
remote := path.Join(share, _path)
return &Object{
fs: f,
remote: trimPathPrefix(remote, f.root),
statResult: stat,
}
}
func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo) *Object {
return f.makeEntry(share, path.Join(_path, relative), stat)
}
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
dir := path.Dir(_path)
if dir == "." {
return nil
}
cn, err := f.getConnection(ctx, share)
if err != nil {
return err
}
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
f.putConnection(&cn)
return err
}
/// Object
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime is the last modified time (read-only)
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.statResult.ModTime()
}
// Size is the file length
func (o *Object) Size() int64 {
return o.statResult.Size()
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Hash always returns empty value
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets modTime on a particular file
func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
share, reqDir := o.split()
if share == "" || reqDir == "" {
return fs.ErrorCantSetModTime
}
reqDir = o.fs.toSambaPath(reqDir)
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return err
}
defer o.fs.putConnection(&cn)
err = cn.smbShare.Chtimes(reqDir, t, t)
if err != nil {
return err
}
fi, err := cn.smbShare.Stat(reqDir)
if err == nil {
o.statResult = fi
}
return err
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
share, filename := o.split()
if share == "" || filename == "" {
return nil, fs.ErrorIsDir
}
filename = o.fs.toSambaPath(filename)
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
case *fs.SeekOption:
offset = x.Offset
case *fs.RangeOption:
offset, limit = x.Decode(o.Size())
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return nil, err
}
fl, err := cn.smbShare.OpenFile(filename, os.O_RDONLY, 0)
if err != nil {
o.fs.putConnection(&cn)
return nil, fmt.Errorf("failed to open: %w", err)
}
pos, err := fl.Seek(offset, io.SeekStart)
if err != nil {
o.fs.putConnection(&cn)
return nil, fmt.Errorf("failed to seek: %w", err)
}
if pos != offset {
o.fs.putConnection(&cn)
return nil, fmt.Errorf("failed to seek: wrong position (expected=%d, reported=%d)", offset, pos)
}
in = readers.NewLimitedReadCloser(fl, limit)
in = &boundReadCloser{
rc: in,
close: func() error {
o.fs.putConnection(&cn)
return nil
},
}
return in, nil
}
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
share, filename := o.split()
if share == "" || filename == "" {
return fs.ErrorIsDir
}
err = o.fs.ensureDirectory(ctx, share, filename)
if err != nil {
return fmt.Errorf("failed to make parent directories: %w", err)
}
filename = o.fs.toSambaPath(filename)
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return err
}
defer func() {
o.statResult, _ = cn.smbShare.Stat(filename)
o.fs.putConnection(&cn)
}()
fl, err := cn.smbShare.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o644)
if err != nil {
return fmt.Errorf("failed to open: %w", err)
}
// remove the file if upload failed
remove := func() {
// Windows doesn't allow removal of files without closing file
removeErr := fl.Close()
if removeErr != nil {
fs.Debugf(src, "failed to close the file for delete: %v", removeErr)
// try to remove the file anyway; the file may be already closed
}
removeErr = cn.smbShare.Remove(filename)
if removeErr != nil {
fs.Debugf(src, "failed to remove: %v", removeErr)
} else {
fs.Debugf(src, "removed after failed upload: %v", err)
}
}
_, err = fl.ReadFrom(in)
if err != nil {
remove()
return fmt.Errorf("Update ReadFrom failed: %w", err)
}
err = fl.Close()
if err != nil {
remove()
return fmt.Errorf("Update Close failed: %w", err)
}
// Set the modified time
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return fmt.Errorf("Update SetModTime failed: %w", err)
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
share, filename := o.split()
if share == "" || filename == "" {
return fs.ErrorIsDir
}
filename = o.fs.toSambaPath(filename)
cn, err := o.fs.getConnection(ctx, share)
if err != nil {
return err
}
err = cn.smbShare.Remove(filename)
o.fs.putConnection(&cn)
return err
}
// String converts this Object to a string
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
/// Misc
// split returns share name and path in the share from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (shareName, filepath string) {
return bucket.Split(path.Join(f.root, rootRelativePath))
}
// split returns share name and path in the share from the object
func (o *Object) split() (shareName, filepath string) {
return o.fs.split(o.remote)
}
func (f *Fs) toSambaPath(path string) string {
// 1. encode via Rclone's escaping system
// 2. convert to backslash-separated path
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(path), "/", "\\")
}
func (f *Fs) toNativePath(path string) string {
// 1. convert *back* to slash-separated path
// 2. encode via Rclone's escaping system
return f.opt.Enc.ToStandardPath(strings.ReplaceAll(path, "\\", "/"))
}
func ensureSuffix(s, suffix string) string {
if strings.HasSuffix(s, suffix) {
return s
}
return s + suffix
}
func trimPathPrefix(s, prefix string) string {
// we need to clean the paths to make tests pass!
s = betterPathClean(s)
prefix = betterPathClean(prefix)
if s == prefix || s == prefix+"/" {
return ""
}
prefix = ensureSuffix(prefix, "/")
return strings.TrimPrefix(s, prefix)
}
func betterPathClean(p string) string {
d := path.Clean(p)
if d == "." {
return ""
}
return d
}
type boundReadCloser struct {
rc io.ReadCloser
close func() error
}
func (r *boundReadCloser) Read(p []byte) (n int, err error) {
return r.rc.Read(p)
}
func (r *boundReadCloser) Close() error {
err1 := r.rc.Close()
err2 := r.close()
if err1 != nil {
return err1
}
return err2
}
func translateError(e error, dir bool) error {
if os.IsNotExist(e) {
if dir {
return fs.ErrorDirNotFound
}
return fs.ErrorObjectNotFound
}
return e
}
var (
_ fs.Fs = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.DirMover = &Fs{}
_ fs.Abouter = &Fs{}
_ fs.Shutdowner = &Fs{}
_ fs.Object = &Object{}
_ io.ReadCloser = &boundReadCloser{}
)

View File

@@ -1,17 +0,0 @@
// Test smb filesystem interface
package smb_test
import (
"testing"
"github.com/rclone/rclone/backend/smb"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSMB:rclone",
NilObject: (*smb.Object)(nil),
})
}

View File

@@ -160,7 +160,6 @@ var (
_ fs.ListRer = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Mover = &Fs{}
_ fs.Copier = &Fs{}
)
// NewFs creates a filesystem backed by Storj.
@@ -721,43 +720,3 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
// Read the new object
return f.NewObject(ctx, remote)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given.
//
// It returns the destination Object and a possible error.
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
// Copy parameters
srcBucket, srcKey := bucket.Split(srcObj.absolute)
dstBucket, dstKey := f.absolute(remote)
options := uplink.CopyObjectOptions{}
// Do the copy
newObject, err := f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
if err != nil {
// Make sure destination bucket exists
_, err := f.project.EnsureBucket(ctx, dstBucket)
if err != nil {
return nil, fmt.Errorf("copy object failed to create destination bucket: %w", err)
}
// And try again
newObject, err = f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
if err != nil {
return nil, fmt.Errorf("copy object failed: %w", err)
}
}
// Return the new object
return newObjectFromUplink(f, remote, newObject), nil
}

View File

@@ -2,7 +2,7 @@ package sugarsync
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"testing"
@@ -48,7 +48,7 @@ func TestErrorHandler(t *testing.T) {
} {
t.Run(test.name, func(t *testing.T) {
resp := http.Response{
Body: io.NopCloser(bytes.NewBufferString(test.body)),
Body: ioutil.NopCloser(bytes.NewBufferString(test.body)),
StatusCode: test.code,
Status: test.status,
}

View File

@@ -40,7 +40,7 @@ const (
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// SharedOptions are shared between swift and backends which depend on swift
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container.
@@ -63,32 +63,6 @@ Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}, {
Name: "no_large_objects",
Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
Swift cannot transparently store files bigger than 5 GiB. There are
two schemes for doing that, static or dynamic large objects, and the
API does not allow rclone to determine whether a file is a static or
dynamic large object without doing a HEAD on the object. Since these
need to be treated differently, this means rclone has to issue HEAD
requests for objects for example when reading checksums.
When |no_large_objects| is set, rclone will assume that there are no
static or dynamic large objects stored. This means it can stop doing
the extra HEAD calls which in turn increases performance greatly
especially when doing a swift to swift transfer with |--checksum| set.
Setting this option implies |no_chunk| and also that no files will be
uploaded in chunks, so files bigger than 5 GiB will just fail on
upload.
If you set this option and there *are* static or dynamic large objects,
then this will give incorrect hashes for them. Downloads will succeed,
but other operations such as Remove and Copy will fail.
`, "|", "`"),
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -248,7 +222,6 @@ type Options struct {
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
NoLargeObjects bool `config:"no_large_objects"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -1127,24 +1100,15 @@ func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
if o.fs.opt.NoLargeObjects {
return false, nil
}
return o.hasHeader(ctx, "X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
if o.fs.opt.NoLargeObjects {
return false, nil
}
return o.hasHeader(ctx, "X-Static-Large-Object")
}
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
if o.fs.opt.NoLargeObjects {
return false, nil
}
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
if result {
return
@@ -1500,7 +1464,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
_, err = o.updateChunks(ctx, in, headers, size, contentType)
if err != nil {
return err

View File

@@ -6,6 +6,7 @@ import (
"context"
"errors"
"io"
"io/ioutil"
"testing"
"github.com/ncw/swift/v2"
@@ -135,7 +136,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
buf := bytes.NewBufferString(contents[:errPosition])
errMessage := "potato"
er := &readers.ErrorReader{Err: errors.New(errMessage)}
in := io.NopCloser(io.MultiReader(buf, er))
in := ioutil.NopCloser(io.MultiReader(buf, er))
file.Size = contentSize
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
@@ -86,7 +87,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
if len(entries) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(io.Discard, readers[i])
_, _ = io.Copy(ioutil.Discard, readers[i])
}
}
} else {

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"path/filepath"
"strings"
@@ -221,19 +222,19 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
o := srcObj.UnWrapUpstream()
su := o.UpstreamFs()
if su.Features().Copy == nil {
return nil, fs.ErrorCantCopy
var srcFs fs.Info
if srcObj, ok := src.(*Object); ok {
// Have a union object - unwrap
o := srcObj.UnWrapUpstream()
srcFs = o.UpstreamFs()
src = o
} else {
// Have a non union object - it might be compatible with a union member
srcFs = src.Fs()
}
var du *upstream.Fs
for _, u := range f.upstreams {
if operations.Same(u.RootFs, su.RootFs) {
if u.Features().Copy != nil && operations.Same(u.RootFs, srcFs) {
du = u
}
}
@@ -243,7 +244,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
if !du.IsCreatable() {
return nil, fs.ErrorPermissionDenied
}
co, err := du.Features().Copy(ctx, o, remote)
co, err := du.Features().Copy(ctx, src, remote)
if err != nil || co == nil {
return nil, err
}
@@ -500,7 +501,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
if len(upstreams) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(io.Discard, readers[i])
_, _ = io.Copy(ioutil.Discard, readers[i])
}
return
}
@@ -893,22 +894,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
WriteMetadata: true,
UserMetadata: true,
}).Fill(ctx, f)
canMove, slowHash := true, false
canMove := true
for _, f := range upstreams {
features = features.Mask(ctx, f) // Mask all upstream fs
if !operations.CanServerSideMove(f) {
canMove = false
}
slowHash = slowHash || f.Features().SlowHash
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -238,7 +239,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
defer fs.CheckClose(resp.Body, &err)
body, err := io.ReadAll(resp.Body)
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}

View File

@@ -991,7 +991,6 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
}
return nil, fs.ErrorCantMove
}
srcFs := srcObj.fs
dstPath := f.filePath(remote)
err := f.mkParentDir(ctx, dstPath)
if err != nil {
@@ -1014,10 +1013,9 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
if f.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
}
// Direct the MOVE/COPY to the source server
err = srcFs.pacer.Call(func() (bool, error) {
resp, err = srcFs.srv.Call(ctx, &opts)
return srcFs.shouldRetry(ctx, resp, err)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("Copy call failed: %w", err)
@@ -1111,10 +1109,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
"Overwrite": "F",
},
}
// Direct the MOVE/COPY to the source server
err = srcFs.pacer.Call(func() (bool, error) {
resp, err = srcFs.srv.Call(ctx, &opts)
return srcFs.shouldRetry(ctx, resp, err)
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return fmt.Errorf("DirMove MOVE call failed: %w", err)

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
@@ -1218,7 +1219,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if partialContent && resp.StatusCode == 200 {
if start > 0 {
// We need to read and discard the beginning of the data...
_, err = io.CopyN(io.Discard, resp.Body, start)
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
if err != nil {
if resp != nil {
_ = resp.Body.Close()

View File

@@ -1,17 +0,0 @@
#!/bin/bash
# This adds the version each backend was released to its docs page
set -e
for backend in $( find backend -maxdepth 1 -type d ); do
backend=$(basename $backend)
if [[ "$backend" == "backend" || "$backend" == "vfs" || "$backend" == "all" || "$backend" == "azurefile" ]]; then
continue
fi
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
if [ "$commit" == "" ]; then
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
fi
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
echo $backend $version
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
done

View File

@@ -9,6 +9,7 @@ import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
@@ -239,7 +240,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
log.Printf("Failed to resolve path: %v", err)
return ""
}
err = os.WriteFile(jsonPath, bs, 0644)
err = ioutil.WriteFile(jsonPath, bs, 0644)
if err != nil {
log.Printf("Failed to write %s: %v", jsonPath, err)
return ""
@@ -475,7 +476,7 @@ func main() {
run("mkdir", "build")
}
chdir("build")
err := os.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
err := ioutil.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
if err != nil {
log.Fatalf("Couldn't write version.txt: %v", err)
}

View File

@@ -16,6 +16,7 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
@@ -167,7 +168,7 @@ func defaultBinDir() string {
// read the body or an error message
func readBody(in io.Reader) string {
data, err := io.ReadAll(in)
data, err := ioutil.ReadAll(in)
if err != nil {
return fmt.Sprintf("Error reading body: %v", err.Error())
}

View File

@@ -49,6 +49,7 @@ docs = [
"hdfs.md",
"hidrive.md",
"http.md",
"hubic.md",
"internetarchive.md",
"jottacloud.md",
"koofr.md",
@@ -59,7 +60,6 @@ docs = [
"azureblob.md",
"onedrive.md",
"opendrive.md",
"oracleobjectstorage.md",
"qingstor.md",
"sia.md",
"swift.md",
@@ -68,7 +68,6 @@ docs = [
"putio.md",
"seafile.md",
"sftp.md",
"smb.md",
"storj.md",
"sugarsync.md",
"tardigrade.md", # stub only to redirect to storj.md

View File

@@ -5,6 +5,7 @@ import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
@@ -55,7 +56,7 @@ func main() {
log.Fatalf("Syntax: %s", os.Args[0])
}
// v1.54.0
versionBytes, err := os.ReadFile("VERSION")
versionBytes, err := ioutil.ReadFile("VERSION")
if err != nil {
log.Fatalf("Failed to read version: %v", err)
}

View File

@@ -15,7 +15,6 @@ else
fi
rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \
--fast-list \
--include "/${version}**" \
--include "/branch/*/${version}**" \
--include "/branch/${version}**" \
memstore:beta-rclone-org

View File

@@ -93,9 +93,6 @@ provided by a backend. Where the value is unlimited it is omitted.
Some backends does not support the ` + "`rclone about`" + ` command at all,
see complete list in [documentation](https://rclone.org/overview/#optional-features).
`,
Annotations: map[string]string{
"versionIntroduced": "v1.41",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)

View File

@@ -30,9 +30,6 @@ rclone config.
Use the --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.`,
Annotations: map[string]string{
"versionIntroduced": "v1.27",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 3, command, args)
return config.Authorize(context.Background(), args, noAutoBrowser)

View File

@@ -58,9 +58,6 @@ Pass arguments to the backend by placing them on the end of the line
Note to run these commands on a running backend then see
[backend/command](/rc/#backend-command) in the rc docs.
`,
Annotations: map[string]string{
"versionIntroduced": "v1.52",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 1e6, command, args)
name, remote := args[0], args[1]

View File

@@ -5,6 +5,7 @@ package bilib
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -105,7 +106,7 @@ func CopyDir(src string, dst string) (err error) {
return
}
entries, err := os.ReadDir(src)
entries, err := ioutil.ReadDir(src)
if err != nil {
return
}
@@ -121,7 +122,7 @@ func CopyDir(src string, dst string) (err error) {
}
} else {
// Skip symlinks.
if entry.Type()&os.ModeSymlink != 0 {
if entry.Mode()&os.ModeSymlink != 0 {
continue
}

View File

@@ -2,7 +2,7 @@ package bilib
import (
"bytes"
"os"
"io/ioutil"
"sort"
"strconv"
)
@@ -57,5 +57,5 @@ func SaveList(list []string, path string) error {
_, _ = buf.WriteString(strconv.Quote(s))
_ = buf.WriteByte('\n')
}
return os.WriteFile(path, buf.Bytes(), PermSecure)
return ioutil.WriteFile(path, buf.Bytes(), PermSecure)
}

View File

@@ -10,6 +10,7 @@ import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path"
@@ -302,7 +303,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
// Execute test scenario
scenFile := filepath.Join(b.testDir, "scenario.txt")
scenBuf, err := os.ReadFile(scenFile)
scenBuf, err := ioutil.ReadFile(scenFile)
scenReplacer := b.newReplacer(false)
require.NoError(b.t, err)
b.step = 0
@@ -902,8 +903,8 @@ func (b *bisyncTest) compareResults() int {
// save mangled logs so difference is easier on eyes
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
resultFile := filepath.Join(b.logDir, "mangled.result.log")
require.NoError(b.t, os.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
require.NoError(b.t, ioutil.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
require.NoError(b.t, ioutil.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
}
if goldenText == resultText {
@@ -973,7 +974,7 @@ func (b *bisyncTest) storeGolden() {
goldName := b.toGolden(fileName)
goldPath := filepath.Join(b.goldenDir, goldName)
err := os.WriteFile(goldPath, []byte(text), bilib.PermSecure)
err := ioutil.WriteFile(goldPath, []byte(text), bilib.PermSecure)
assert.NoError(b.t, err, "writing golden file %s", goldName)
if goldName != fileName {
@@ -985,7 +986,7 @@ func (b *bisyncTest) storeGolden() {
// mangleResult prepares test logs or listings for comparison
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
buf, err := os.ReadFile(filepath.Join(dir, file))
buf, err := ioutil.ReadFile(filepath.Join(dir, file))
require.NoError(b.t, err)
text := string(buf)
@@ -1204,7 +1205,7 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
}
func (b *bisyncTest) listDir(dir string) (names []string) {
files, err := os.ReadDir(dir)
files, err := ioutil.ReadDir(dir)
require.NoError(b.t, err)
for _, file := range files {
names = append(names, filepath.Base(file.Name()))

View File

@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -115,9 +116,6 @@ var commandDefinition = &cobra.Command{
Use: "bisync remote1:path1 remote2:path2",
Short: shortHelp,
Long: longHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.58",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 2, command, args)
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
@@ -200,7 +198,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
_ = f.Close()
hashFile := filtersFile + ".md5"
wantHash, err := os.ReadFile(hashFile)
wantHash, err := ioutil.ReadFile(hashFile)
if err != nil && !opt.Resync {
return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
}
@@ -211,7 +209,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
if opt.Resync {
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
if err := ioutil.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
return ctx, err
}
}

View File

@@ -7,6 +7,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -80,7 +81,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
}
pidStr := []byte(strconv.Itoa(os.Getpid()))
if err = os.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
if err = ioutil.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
}
fs.Debugf(nil, "Lock file created: %s", lockFile)

View File

@@ -25,10 +25,6 @@ var commandDefinition = &cobra.Command{
Print cache stats for a remote in JSON format
`,
Hidden: true,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
"status": "Deprecated",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])

View File

@@ -4,6 +4,7 @@ package cat
import (
"context"
"io"
"io/ioutil"
"log"
"os"
"strings"
@@ -57,9 +58,6 @@ the end and |--offset| and |--count| to print a section in the middle.
Note that if offset is negative it will count from the end, so
|--offset -1 --count 1| is equivalent to |--tail 1|.
`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.33",
},
Run: func(command *cobra.Command, args []string) {
usedOffset := offset != 0 || count >= 0
usedHead := head > 0
@@ -79,7 +77,7 @@ Note that if offset is negative it will count from the end, so
fsrc := cmd.NewFsSrc(args)
var w io.Writer = os.Stdout
if discard {
w = io.Discard
w = ioutil.Discard
}
cmd.Run(false, false, command, func() error {
return operations.Cat(context.Background(), fsrc, w, offset, count)

View File

@@ -37,9 +37,6 @@ that don't support hashes or if you really want to check all the data.
Note that hash values in the SUM file are treated as case insensitive.
`, "|", "`") + check.FlagsHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.56",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(3, 3, command, args)
var hashType hash.Type

View File

@@ -20,9 +20,6 @@ var commandDefinition = &cobra.Command{
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.
`,
Annotations: map[string]string{
"versionIntroduced": "v1.31",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)

View File

@@ -8,7 +8,6 @@ import (
"io"
"os"
"path"
"strings"
"sync"
"sync/atomic"
"time"
@@ -96,11 +95,8 @@ func (fsys *FS) closeHandle(fh uint64) (errc int) {
}
// lookup a Node given a path
func (fsys *FS) lookupNode(path string) (vfs.Node, int) {
func (fsys *FS) lookupNode(path string) (node vfs.Node, errc int) {
node, err := fsys.VFS.Stat(path)
if err == vfs.ENOENT && fsys.VFS.Opt.Links {
node, err = fsys.VFS.Stat(path + fs.LinkSuffix)
}
return node, translateError(err)
}
@@ -121,13 +117,6 @@ func (fsys *FS) lookupDir(path string) (dir *vfs.Dir, errc int) {
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, errc int) {
parentDir, leaf := path.Split(filePath)
dir, errc = fsys.lookupDir(parentDir)
// Try to get real leaf for symlinks
if fsys.VFS.Opt.Links {
node, e := fsys.lookupNode(filePath)
if e == 0 {
leaf = node.Name()
}
}
return leaf, dir, errc
}
@@ -164,9 +153,15 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
Size := uint64(node.Size())
Blocks := (Size + 511) / 512
modTime := node.ModTime()
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else {
Mode |= fuse.S_IFREG
}
//stat.Dev = 1
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
stat.Mode = getMode(node)
stat.Mode = uint32(Mode)
stat.Nlink = 1
stat.Uid = fsys.VFS.Opt.UID
stat.Gid = fsys.VFS.Opt.GID
@@ -257,7 +252,7 @@ func (fsys *FS) Readdir(dirPath string,
fill(".", nil, 0)
fill("..", nil, 0)
for _, node := range nodes {
name, _ := fsys.VFS.TrimSymlink(node.Name())
name := node.Name()
if len(name) > mountlib.MaxLeafSize {
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
continue
@@ -335,15 +330,13 @@ func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (err
if errc != 0 {
return errc
}
// translate the fuse flags to os flags
osFlags := translateOpenFlags(fi.Flags) | os.O_CREATE
// translate the fuse mode to os mode
//osMode := getFileMode(mode)
file, err := parentDir.Create(leaf, osFlags)
file, err := parentDir.Create(leaf, fi.Flags)
if err != nil {
return translateError(err)
}
handle, err := file.Open(osFlags)
// translate the fuse flags to os flags
flags := translateOpenFlags(fi.Flags) | os.O_CREATE
handle, err := file.Open(flags)
if err != nil {
return translateError(err)
}
@@ -463,18 +456,6 @@ func (fsys *FS) Rmdir(dirPath string) (errc int) {
// Rename renames a file.
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
if fsys.VFS.Opt.Links {
node, e := fsys.lookupNode(oldPath)
if e == 0 {
if strings.HasSuffix(node.Name(), fs.LinkSuffix) {
oldPath += fs.LinkSuffix
newPath += fs.LinkSuffix
}
}
}
return translateError(fsys.VFS.Rename(oldPath, newPath))
}
@@ -524,58 +505,14 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer log.Trace(fsys, "Requested to symlink newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
if fsys.VFS.Opt.Links {
// The user must NOT provide .rclonelink suffix
if strings.HasSuffix(newpath, fs.LinkSuffix) {
fs.Errorf(nil, "Invalid name suffix provided: %v", newpath)
return translateError(vfs.EINVAL)
}
newpath += fs.LinkSuffix
} else {
// The user must provide .rclonelink suffix
if !strings.HasSuffix(newpath, fs.LinkSuffix) {
fs.Errorf(nil, "Invalid name suffix provided: %v", newpath)
return translateError(vfs.EINVAL)
}
}
// Add target suffix when linking to a link
if !strings.HasSuffix(target, fs.LinkSuffix) {
vnode, err := fsys.lookupNode(target)
if err == 0 && strings.HasSuffix(vnode.Name(), fs.LinkSuffix) {
target += fs.LinkSuffix
}
}
return translateError(fsys.VFS.Symlink(target, newpath))
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer log.Trace(fsys, "Requested to read link")("errc=%v, linkPath=%q", &errc, linkPath)
if fsys.VFS.Opt.Links {
// The user must NOT provide .rclonelink suffix
if strings.HasSuffix(path, fs.LinkSuffix) {
fs.Errorf(nil, "Invalid name suffix provided: %v", path)
return translateError(vfs.EINVAL), ""
}
path += fs.LinkSuffix
} else {
// The user must provide .rclonelink suffix
if !strings.HasSuffix(path, fs.LinkSuffix) {
fs.Errorf(nil, "Invalid name suffix provided: %v", path)
return translateError(vfs.EINVAL), ""
}
}
linkPath, err := fsys.VFS.Readlink(path)
linkPath, _ = fsys.VFS.TrimSymlink(linkPath)
return translateError(err), linkPath
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
@@ -690,41 +627,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
return outFlags
}
// get the Mode from a vfs Node
func getMode(node os.FileInfo) uint32 {
vfsMode := node.Mode()
Mode := vfsMode.Perm()
if vfsMode&os.ModeDir != 0 {
Mode |= fuse.S_IFDIR
} else if vfsMode&os.ModeSymlink != 0 {
Mode |= fuse.S_IFLNK
} else if vfsMode&os.ModeNamedPipe != 0 {
Mode |= fuse.S_IFIFO
} else {
Mode |= fuse.S_IFREG
}
return uint32(Mode)
}
// convert fuse mode to os.FileMode
// func getFileMode(mode uint32) os.FileMode {
// osMode := os.FileMode(0)
// if mode&fuse.S_IFDIR != 0 {
// mode ^= fuse.S_IFDIR
// osMode |= os.ModeDir
// } else if mode&fuse.S_IFREG != 0 {
// mode ^= fuse.S_IFREG
// } else if mode&fuse.S_IFLNK != 0 {
// mode ^= fuse.S_IFLNK
// osMode |= os.ModeSymlink
// } else if mode&fuse.S_IFIFO != 0 {
// mode ^= fuse.S_IFIFO
// osMode |= os.ModeNamedPipe
// }
// osMode |= os.FileMode(mode)
// return osMode
// }
// Make sure interfaces are satisfied
var (
_ fuse.FileSystemInterface = (*FS)(nil)

View File

@@ -44,9 +44,6 @@ var configCommand = &cobra.Command{
remotes and manage existing ones. You may also set or remove a
password to protect your configuration.
`,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
return config.EditConfig(context.Background())
@@ -57,18 +54,12 @@ var configEditCommand = &cobra.Command{
Use: "edit",
Short: configCommand.Short,
Long: configCommand.Long,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
Run: configCommand.Run,
Run: configCommand.Run,
}
var configFileCommand = &cobra.Command{
Use: "file",
Short: `Show path of configuration file in use.`,
Annotations: map[string]string{
"versionIntroduced": "v1.38",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
config.ShowConfigLocation()
@@ -78,9 +69,6 @@ var configFileCommand = &cobra.Command{
var configTouchCommand = &cobra.Command{
Use: "touch",
Short: `Ensure configuration file exists.`,
Annotations: map[string]string{
"versionIntroduced": "v1.56",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
config.SaveConfig()
@@ -90,9 +78,6 @@ var configTouchCommand = &cobra.Command{
var configPathsCommand = &cobra.Command{
Use: "paths",
Short: `Show paths used for configuration, cache, temp etc.`,
Annotations: map[string]string{
"versionIntroduced": "v1.57",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 0, command, args)
fmt.Printf("Config file: %s\n", config.GetConfigPath())
@@ -104,9 +89,6 @@ var configPathsCommand = &cobra.Command{
var configShowCommand = &cobra.Command{
Use: "show [<remote>]",
Short: `Print (decrypted) config file, or the config for a single remote.`,
Annotations: map[string]string{
"versionIntroduced": "v1.38",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
if len(args) == 0 {
@@ -121,9 +103,6 @@ var configShowCommand = &cobra.Command{
var configDumpCommand = &cobra.Command{
Use: "dump",
Short: `Dump the config file as JSON.`,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
return config.Dump()
@@ -133,9 +112,6 @@ var configDumpCommand = &cobra.Command{
var configProvidersCommand = &cobra.Command{
Use: "providers",
Short: `List in JSON format all the providers and options.`,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
return config.JSONListProviders()
@@ -176,7 +152,7 @@ This will look something like (some irrelevant detail removed):
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
"Default": true,
"Examples": [
{
@@ -250,9 +226,6 @@ using remote authorization you would do this:
rclone config create mydrive drive config_is_local=false
`, "|", "`") + configPasswordHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 256, command, args)
in, err := argsToMap(args[2:])
@@ -316,9 +289,6 @@ require this add an extra parameter thus:
rclone config update myremote env_auth=true config_refresh_token=false
`, "|", "`") + configPasswordHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 256, command, args)
in, err := argsToMap(args[1:])
@@ -334,9 +304,6 @@ require this add an extra parameter thus:
var configDeleteCommand = &cobra.Command{
Use: "delete name",
Short: "Delete an existing remote.",
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
config.DeleteRemote(args[0])
@@ -359,9 +326,6 @@ For example, to set password of a remote of name myremote you would do:
This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly.
`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 256, command, args)
in, err := argsToMap(args[1:])

Some files were not shown because too many files have changed in this diff Show More