mirror of
https://github.com/rclone/rclone.git
synced 2026-02-04 18:53:16 +00:00
Compare commits
177 Commits
v1.60-stab
...
pasnox-sym
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19c6081de2 | ||
|
|
1891b6848b | ||
|
|
8323727d19 | ||
|
|
0801342108 | ||
|
|
5c8bd27c7f | ||
|
|
46de095e9c | ||
|
|
3834de5ab1 | ||
|
|
44c3aebfc3 | ||
|
|
98fa93f6d1 | ||
|
|
c6c67a29eb | ||
|
|
ad5395e953 | ||
|
|
1925ceaade | ||
|
|
8aebf12797 | ||
|
|
ffeefe8a56 | ||
|
|
81ce5e4961 | ||
|
|
638058ef91 | ||
|
|
b1b62f70d3 | ||
|
|
823d89af9a | ||
|
|
448fff9a04 | ||
|
|
6257a6035c | ||
|
|
54c0f17f2a | ||
|
|
d049cbb59e | ||
|
|
00e853144e | ||
|
|
5ac8cfee56 | ||
|
|
496ae8adf6 | ||
|
|
2001cc0831 | ||
|
|
a35490bf70 | ||
|
|
01877e5a0f | ||
|
|
614d79121a | ||
|
|
3a6f1f5cd7 | ||
|
|
4a31961c4f | ||
|
|
7be9855a70 | ||
|
|
6f8112ff67 | ||
|
|
67fc227684 | ||
|
|
7edb4c0162 | ||
|
|
5db4493557 | ||
|
|
a85c0b0cc2 | ||
|
|
52443c2444 | ||
|
|
4444d2d102 | ||
|
|
08a1ca434b | ||
|
|
a9ce86f9a3 | ||
|
|
3167292c2f | ||
|
|
ec7cc2b3c3 | ||
|
|
2a2fcf1012 | ||
|
|
6d62267227 | ||
|
|
dfd8ad2fff | ||
|
|
43506f8086 | ||
|
|
ec3cee89d3 | ||
|
|
a171497a8b | ||
|
|
c6ad15e3b8 | ||
|
|
9a81885b51 | ||
|
|
3d291da0f6 | ||
|
|
43bf177ff7 | ||
|
|
c446651be8 | ||
|
|
6c407dbe15 | ||
|
|
5a59b49b6b | ||
|
|
8b9f3bbe29 | ||
|
|
8e6a469f98 | ||
|
|
f650a543ef | ||
|
|
683178a1f4 | ||
|
|
3937233e1e | ||
|
|
c571200812 | ||
|
|
04a663829b | ||
|
|
6b4a2c1c4e | ||
|
|
f73be767a4 | ||
|
|
4120dffcc1 | ||
|
|
53ff5bb205 | ||
|
|
397f428c48 | ||
|
|
c5a2c9b046 | ||
|
|
b98d7f6634 | ||
|
|
beea4d5119 | ||
|
|
8e507075d1 | ||
|
|
be783a1856 | ||
|
|
450c366403 | ||
|
|
1dbdc48a77 | ||
|
|
d7cb17848d | ||
|
|
f3c8b7a948 | ||
|
|
914fbe242c | ||
|
|
f746b2fe85 | ||
|
|
a131da2c35 | ||
|
|
60e4cb6f6f | ||
|
|
0a8b1fe5de | ||
|
|
b24c83db21 | ||
|
|
4f386a1ccd | ||
|
|
ab849b3613 | ||
|
|
10aee3926a | ||
|
|
4583b61e3d | ||
|
|
483e9e1ee3 | ||
|
|
c2dfc3e5b3 | ||
|
|
a9bd0c8de6 | ||
|
|
1628ca0d46 | ||
|
|
313493d51b | ||
|
|
6d18f60725 | ||
|
|
d74662a751 | ||
|
|
d05fd2a14f | ||
|
|
097be753ab | ||
|
|
50c9678cea | ||
|
|
7672cde4f3 | ||
|
|
a4c65532ea | ||
|
|
46b080c092 | ||
|
|
0edf6478e3 | ||
|
|
f7cdf318db | ||
|
|
6f3682c12f | ||
|
|
e3d593d40c | ||
|
|
83551bb02e | ||
|
|
430bf0d5eb | ||
|
|
dd71f5d968 | ||
|
|
7db1c506f2 | ||
|
|
959cd938bc | ||
|
|
03b07c280c | ||
|
|
705e8f2fe0 | ||
|
|
591fc3609a | ||
|
|
b4a3d1b9ed | ||
|
|
84219b95ab | ||
|
|
2c78f56d48 | ||
|
|
a61d219bcd | ||
|
|
652d3cdee4 | ||
|
|
bb1fc5b86d | ||
|
|
efd3c6449b | ||
|
|
0ac5795f8c | ||
|
|
2f77651f64 | ||
|
|
8daacc2b99 | ||
|
|
87fa9f8e46 | ||
|
|
1392793334 | ||
|
|
0e427216db | ||
|
|
0c56c46523 | ||
|
|
617c5d5e1b | ||
|
|
ec2024b907 | ||
|
|
458845ce89 | ||
|
|
57bde20acd | ||
|
|
b0248e8070 | ||
|
|
b285efb476 | ||
|
|
be6f29930b | ||
|
|
653bc23728 | ||
|
|
47b04580db | ||
|
|
919e28b8bf | ||
|
|
3a3bc5a1ae | ||
|
|
133c006c37 | ||
|
|
e455940f71 | ||
|
|
65528fd009 | ||
|
|
691159fe94 | ||
|
|
09858c0c5a | ||
|
|
5fd0abb2b9 | ||
|
|
36c37ffec1 | ||
|
|
6a5b7664f7 | ||
|
|
ebac854512 | ||
|
|
cafce96185 | ||
|
|
92ffcf9f86 | ||
|
|
64cdbb67b5 | ||
|
|
528fc899fb | ||
|
|
d452f502c3 | ||
|
|
5d6b8141ec | ||
|
|
776e5ea83a | ||
|
|
c9acc06a49 | ||
|
|
a2dca02594 | ||
|
|
210331bf61 | ||
|
|
5b5fdc6bc5 | ||
|
|
0de74864b6 | ||
|
|
7042a11875 | ||
|
|
028832ce73 | ||
|
|
c7c9356af5 | ||
|
|
3292c112c5 | ||
|
|
126d71b332 | ||
|
|
df9be72a82 | ||
|
|
6aa8f7409a | ||
|
|
10c884552c | ||
|
|
2617610741 | ||
|
|
53dd174f3d | ||
|
|
65987f5970 | ||
|
|
1fc864fb32 | ||
|
|
22abcc9fd2 | ||
|
|
178cf821de | ||
|
|
f4a571786c | ||
|
|
c0a8ffcbef | ||
|
|
76eeca9eae | ||
|
|
8114744bce | ||
|
|
db5d582404 |
32
.github/workflows/build.yml
vendored
32
.github/workflows/build.yml
vendored
@@ -30,7 +30,7 @@ jobs:
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -41,14 +41,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -57,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -74,20 +74,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.19.x'
|
||||
go: '1.19'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.17.x'
|
||||
go: '1.17'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.18.x'
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -104,7 +104,6 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
stable: 'false'
|
||||
go-version: ${{ matrix.go }}
|
||||
check-latest: true
|
||||
|
||||
@@ -234,6 +233,19 @@ jobs:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Run govulncheck on the latest go version, the one we build binaries with
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
- name: Scan for vulnerabilities
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
@@ -250,7 +262,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.19
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -15,3 +15,4 @@ fuzz-build.zip
|
||||
*.rej
|
||||
Thumbs.db
|
||||
__pycache__
|
||||
*.kate-swp
|
||||
|
||||
2445
MANUAL.html
generated
2445
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2746
MANUAL.txt
generated
2746
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
3
Makefile
3
Makefile
@@ -81,6 +81,9 @@ quicktest:
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
|
||||
@@ -50,6 +50,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
|
||||
@@ -74,8 +74,7 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -17,10 +16,12 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -32,36 +33,6 @@ var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(ioutil.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
@@ -17,9 +17,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -183,7 +183,7 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
file, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
|
||||
68
backend/cache/cache_internal_test.go
vendored
68
backend/cache/cache_internal_test.go
vendored
@@ -11,7 +11,6 @@ import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -102,14 +101,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
// Instantiate inner fs
|
||||
innerFolder := "inner"
|
||||
runInstance.mkdir(t, rootFs, innerFolder)
|
||||
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
||||
|
||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||
listRoot, err := runInstance.list(t, rootFs, "")
|
||||
@@ -167,7 +164,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
li2 := [2]string{path.Join("test", "one"), path.Join("test", "second")}
|
||||
for _, r := range li2 {
|
||||
var err error
|
||||
ci, err := ioutil.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
ci, err := os.ReadDir(path.Join(runInstance.chunkPath, runInstance.encryptRemoteIfNeeded(t, path.Join(id, r))))
|
||||
if err != nil || len(ci) == 0 {
|
||||
log.Printf("========== '%v' not in cache", r)
|
||||
} else {
|
||||
@@ -226,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -259,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
|
||||
func TestInternalObjNotFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
@@ -270,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -298,8 +292,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||
@@ -317,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
var err error
|
||||
|
||||
// create some rand test data
|
||||
@@ -347,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -378,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -405,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -460,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
|
||||
func TestInternalMoveWithNotify(t *testing.T) {
|
||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -547,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -634,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -667,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -689,8 +674,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -725,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -763,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skipf("skipping crypt")
|
||||
@@ -841,7 +822,7 @@ func newRun() *run {
|
||||
}
|
||||
|
||||
if uploadDir == "" {
|
||||
r.tmpUploadDir, err = ioutil.TempDir("", "rclonecache-tmp")
|
||||
r.tmpUploadDir, err = os.MkdirTemp("", "rclonecache-tmp")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create temp dir: %v", err))
|
||||
}
|
||||
@@ -866,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
return enc
|
||||
}
|
||||
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
@@ -959,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
runInstance.cleanupFs(t, f)
|
||||
})
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
@@ -984,7 +970,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
chunk := int64(1024)
|
||||
cnt := size / chunk
|
||||
left := size % chunk
|
||||
f, err := ioutil.TempFile("", "rclonecache-tempfile")
|
||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cnt); i++ {
|
||||
|
||||
24
backend/cache/cache_upload_test.go
vendored
24
backend/cache/cache_upload_test.go
vendored
@@ -21,10 +21,8 @@ import (
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -162,10 +152,8 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
|
||||
4
backend/cache/plex.go
vendored
4
backend/cache/plex.go
vendored
@@ -8,7 +8,7 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@@ -167,7 +167,7 @@ func (p *plexConnector) listenWebsocket() {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
data, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
5
backend/cache/storage_persistent.go
vendored
5
backend/cache/storage_persistent.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -473,7 +472,7 @@ func (b *Persistent) GetChunk(cachedObject *Object, offset int64) ([]byte, error
|
||||
var data []byte
|
||||
|
||||
fp := path.Join(b.dataPath, cachedObject.abs(), strconv.FormatInt(offset, 10))
|
||||
data, err := ioutil.ReadFile(fp)
|
||||
data, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -486,7 +485,7 @@ func (b *Persistent) AddChunk(fp string, data []byte, offset int64) error {
|
||||
_ = os.MkdirAll(path.Join(b.dataPath, fp), os.ModePerm)
|
||||
|
||||
filePath := path.Join(b.dataPath, fp, strconv.FormatInt(offset, 10))
|
||||
err := ioutil.WriteFile(filePath, data, os.ModePerm)
|
||||
err := os.WriteFile(filePath, data, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -1038,7 +1037,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
metadata, err := ioutil.ReadAll(reader)
|
||||
metadata, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1097,7 +1096,7 @@ func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
data, err := io.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -413,7 +413,7 @@ func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
@@ -538,7 +538,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
chunkContents, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -573,7 +573,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_, err = io.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
@@ -672,7 +672,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
@@ -758,8 +758,8 @@ func testFutureProof(t *testing.T, f *Fs) {
|
||||
assert.Error(t, err)
|
||||
|
||||
// Rcat must fail
|
||||
in := ioutil.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime)
|
||||
in := io.NopCloser(bytes.NewBufferString("abc"))
|
||||
robj, err := operations.Rcat(ctx, f, file, in, modTime, nil)
|
||||
assert.Nil(t, robj)
|
||||
assert.NotNil(t, err)
|
||||
if err != nil {
|
||||
@@ -854,7 +854,7 @@ func testChunkerServerSideMove(t *testing.T, f *Fs) {
|
||||
r, err := dstFile.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
data, err := ioutil.ReadAll(r)
|
||||
data, err := io.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
|
||||
@@ -631,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
@@ -468,7 +467,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
||||
}
|
||||
|
||||
fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file")
|
||||
tempFile, err := ioutil.TempFile("", "rclone-press-")
|
||||
tempFile, err := os.CreateTemp("", "rclone-press-")
|
||||
defer func() {
|
||||
// these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish
|
||||
// to ignore them
|
||||
@@ -546,8 +545,8 @@ func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, o
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), ioutil.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options)
|
||||
//o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx))
|
||||
if err != nil {
|
||||
if o != nil {
|
||||
removeErr := o.Remove(ctx)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -1073,7 +1072,7 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
source := newRandomSource(copySize)
|
||||
encrypted, err := c.newEncrypter(source, nil)
|
||||
assert.NoError(t, err)
|
||||
decrypted, err := c.newDecrypter(ioutil.NopCloser(encrypted))
|
||||
decrypted, err := c.newDecrypter(io.NopCloser(encrypted))
|
||||
assert.NoError(t, err)
|
||||
sink := newRandomSource(copySize)
|
||||
n, err := io.CopyBuffer(sink, decrypted, buf)
|
||||
@@ -1144,15 +1143,15 @@ func TestEncryptData(t *testing.T) {
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
out, err := ioutil.ReadAll(encrypted)
|
||||
out, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expected, out)
|
||||
|
||||
// Check we can decode the data properly too...
|
||||
buf = bytes.NewBuffer(out)
|
||||
decrypted, err := c.DecryptData(ioutil.NopCloser(buf))
|
||||
decrypted, err := c.DecryptData(io.NopCloser(buf))
|
||||
assert.NoError(t, err)
|
||||
out, err = ioutil.ReadAll(decrypted)
|
||||
out, err = io.ReadAll(decrypted)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.in, out)
|
||||
}
|
||||
@@ -1187,7 +1186,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -1257,12 +1256,12 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
in1 := bytes.NewBuffer(file16)
|
||||
in := ioutil.NopCloser(io.MultiReader(in1, in2))
|
||||
in := io.NopCloser(io.MultiReader(in1, in2))
|
||||
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
n, err := io.CopyN(io.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
@@ -1274,14 +1273,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
|
||||
// Make random data
|
||||
const dataSize = 150000
|
||||
plaintext, err := ioutil.ReadAll(newRandomSource(dataSize))
|
||||
plaintext, err := io.ReadAll(newRandomSource(dataSize))
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Encrypt the data
|
||||
buf := bytes.NewBuffer(plaintext)
|
||||
encrypted, err := c.EncryptData(buf)
|
||||
assert.NoError(t, err)
|
||||
ciphertext, err := ioutil.ReadAll(encrypted)
|
||||
ciphertext, err := io.ReadAll(encrypted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
trials := []int{0, 1, 2, 3, 4, 5, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65,
|
||||
@@ -1300,7 +1299,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
}
|
||||
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
@@ -1490,7 +1489,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
assert.NoError(t, err, what)
|
||||
continue
|
||||
}
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
var expectedErr error
|
||||
switch {
|
||||
case i == fileHeaderSize:
|
||||
@@ -1514,7 +1513,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
cd := newCloseDetector(in)
|
||||
fh, err := c.newDecrypter(cd)
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, "potato")
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
@@ -1524,13 +1523,13 @@ func TestDecrypterRead(t *testing.T) {
|
||||
copy(file16copy, file16)
|
||||
for i := range file16copy {
|
||||
file16copy[i] ^= 0xFF
|
||||
fh, err := c.newDecrypter(ioutil.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
fh, err := c.newDecrypter(io.NopCloser(bytes.NewBuffer(file16copy)))
|
||||
if i < fileMagicSize {
|
||||
assert.Error(t, err, ErrorEncryptedBadMagic.Error())
|
||||
assert.Nil(t, fh)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
_, err = ioutil.ReadAll(fh)
|
||||
_, err = io.ReadAll(fh)
|
||||
assert.Error(t, err, ErrorEncryptedFileBadHeader.Error())
|
||||
}
|
||||
file16copy[i] ^= 0xFF
|
||||
@@ -1565,7 +1564,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
assert.Equal(t, 0, cd.closed)
|
||||
|
||||
// close after reading
|
||||
out, err := ioutil.ReadAll(fh)
|
||||
out, err := io.ReadAll(fh)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte{1}, out)
|
||||
assert.Equal(t, io.EOF, fh.err)
|
||||
|
||||
@@ -396,6 +396,8 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -413,6 +415,9 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
@@ -1047,10 +1052,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is an operations.OverrideRemote
|
||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
// Otherwise don't unwrap any further
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
|
||||
@@ -17,41 +17,28 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
}
|
||||
return localFs, cleanup
|
||||
})
|
||||
return localFs
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
}
|
||||
return obj, cleanup
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
obj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in an fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
@@ -116,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
localObj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
remoteObj := uploadFile(t, f, path, contents)
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
|
||||
@@ -14,11 +14,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -1108,7 +1107,7 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
@@ -3431,13 +3430,12 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
name := fspath.MakeConfigName(drive.Name)
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
@@ -3800,7 +3798,7 @@ func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.
|
||||
data = data[:limit]
|
||||
}
|
||||
|
||||
return ioutil.NopCloser(bytes.NewReader(data)), nil
|
||||
return io.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
func (o *baseObject) update(ctx context.Context, updateInfo *drive.File, uploadMimeType string, in io.Reader,
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"os"
|
||||
"path"
|
||||
@@ -78,7 +77,7 @@ var additionalMimeTypes = map[string]string{
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleFormats(t *testing.T) {
|
||||
fetchFormatsOnce.Do(func() {})
|
||||
buf, err := ioutil.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
buf, err := os.ReadFile(filepath.FromSlash("test/about.json"))
|
||||
var about struct {
|
||||
ExportFormats map[string][]string `json:"exportFormats,omitempty"`
|
||||
ImportFormats map[string][]string `json:"importFormats,omitempty"`
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -1186,7 +1185,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
if o.contentType == emptyMimeType {
|
||||
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
return io.NopCloser(bytes.NewReader([]byte{})), nil
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
resp, err := o.fs.rpc(ctx, "getFile", params{
|
||||
|
||||
@@ -70,7 +70,7 @@ func init() {
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
than port 21. Cannot be used in combination with explicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
@@ -78,7 +78,7 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
@@ -657,8 +657,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
if remote == "" || remote == "." || remote == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
@@ -666,13 +665,32 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
|
||||
// returns TRUE if MLST is supported which is required to call GetEntry
|
||||
if c.IsTimePreciseInList() {
|
||||
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
err = translateErrorFile(err)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
f.entryToStandard(entry)
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -691,7 +709,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -713,7 +731,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
@@ -857,32 +875,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
file, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
return nil, err
|
||||
} else if file != nil {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -487,7 +487,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServiceAccountFile))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening service account credentials file: %w", err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package googlephotos
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
@@ -99,7 +98,7 @@ func TestIntegration(t *testing.T) {
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
buf, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
@@ -118,7 +117,7 @@ func (o *Object) updateHashes(ctx context.Context) error {
|
||||
defer func() {
|
||||
_ = r.Close()
|
||||
}()
|
||||
if _, err = io.Copy(ioutil.Discard, r); err != nil {
|
||||
if _, err = io.Copy(io.Discard, r); err != nil {
|
||||
fs.Infof(o, "update failed (copy): %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -305,7 +304,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat(ctx)
|
||||
err := o.head(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -317,15 +316,6 @@ func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
@@ -500,7 +490,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
switch err := file.head(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
@@ -579,8 +569,8 @@ func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
// head sends a HEAD request to update info fields in the Object
|
||||
func (o *Object) head(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
@@ -601,13 +591,19 @@ func (o *Object) stat(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stat: %w", err)
|
||||
}
|
||||
return o.decodeMetadata(ctx, res)
|
||||
}
|
||||
|
||||
// decodeMetadata updates info fields in the Object according to HTTP response headers
|
||||
func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
mediaType, _, err := mime.ParseMediaType(o.contentType)
|
||||
@@ -653,6 +649,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Open failed: %w", err)
|
||||
}
|
||||
if err = o.decodeMetadata(ctx, res); err != nil {
|
||||
return nil, fmt.Errorf("decodeMetadata failed: %w", err)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ package http
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@@ -33,20 +33,21 @@ var (
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// prepareServer prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepareServer(t *testing.T) configmap.Simple {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// verify the file path is correct, and also check which line endings
|
||||
// are used to get sizes right ("\n" except on Windows, but even there
|
||||
// we may have "\n" or "\r\n" depending on git crlf setting)
|
||||
fileList, err := ioutil.ReadDir(filesPath)
|
||||
fileList, err := os.ReadDir(filesPath)
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(fileList), 0)
|
||||
for _, file := range fileList {
|
||||
if !file.IsDir() {
|
||||
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
data, _ := os.ReadFile(filepath.Join(filesPath, file.Name()))
|
||||
if strings.HasSuffix(string(data), "\r\n") {
|
||||
lineEndSize = 2
|
||||
}
|
||||
@@ -78,20 +79,21 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
return m
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
// prepare prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepare(t *testing.T) fs.Fs {
|
||||
m := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
return f
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
@@ -134,22 +136,19 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
@@ -166,8 +165,7 @@ func TestListSubDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -194,36 +192,69 @@ func TestNewObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
for _, head := range []bool{false, true} {
|
||||
if !head {
|
||||
m.Set("no_head", "true")
|
||||
}
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
for _, rangeRead := range []bool{false, true} {
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
if !head {
|
||||
// Test mod time is still indeterminate
|
||||
tObj := o.ModTime(context.Background())
|
||||
assert.Equal(t, time.Duration(0), time.Unix(0, 0).Sub(tObj))
|
||||
|
||||
// Test file size is still indeterminate
|
||||
assert.Equal(t, int64(-1), o.Size())
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if !rangeRead {
|
||||
// Test normal read
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
if lineEndSize == 2 {
|
||||
assert.Equal(t, "beetroot\r\n", string(data))
|
||||
} else {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
}
|
||||
} else {
|
||||
// Test with range request
|
||||
fd, err := o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = io.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
// Test the time is always correct on the object after file open
|
||||
tObj := o.ModTime(context.Background())
|
||||
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
|
||||
|
||||
if !rangeRead {
|
||||
// Test the file size
|
||||
assert.Equal(t, int64(len(data)), o.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -234,8 +265,7 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
@@ -244,8 +274,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -822,7 +821,7 @@ func (f *Fs) allocatePathRaw(file string, absolute bool) string {
|
||||
func grantTypeFilter(req *http.Request) {
|
||||
if legacyTokenURL == req.URL.String() {
|
||||
// read the entire body
|
||||
refreshBody, err := ioutil.ReadAll(req.Body)
|
||||
refreshBody, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -832,7 +831,7 @@ func grantTypeFilter(req *http.Request) {
|
||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||
|
||||
// set the new ReadCloser (with a dummy Close())
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
|
||||
req.Body = io.NopCloser(bytes.NewReader(refreshBody))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1789,7 +1788,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
var tempFile *os.File
|
||||
|
||||
// create the cache file
|
||||
tempFile, err = ioutil.TempFile("", cachePrefix)
|
||||
tempFile, err = os.CreateTemp("", cachePrefix)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1817,7 +1816,7 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
||||
} else {
|
||||
// that's a small file, just read it into memory
|
||||
var inData []byte
|
||||
inData, err = ioutil.ReadAll(teeReader)
|
||||
inData, err = io.ReadAll(teeReader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -1914,7 +1913,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// copy the already uploaded bytes into the trash :)
|
||||
var result api.UploadResponse
|
||||
_, err = io.CopyN(ioutil.Discard, in, response.ResumePos)
|
||||
_, err = io.CopyN(io.Discard, in, response.ResumePos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
||||
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
||||
|
||||
// Register with Fs
|
||||
@@ -73,7 +71,7 @@ supported by all file systems) under the "user.*" prefix.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "links",
|
||||
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
||||
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension.",
|
||||
Default: false,
|
||||
NoPrefix: true,
|
||||
ShortOpt: "l",
|
||||
@@ -124,8 +122,8 @@ routine so this flag shouldn't normally be used.`,
|
||||
Help: `Don't check to see if the files change during upload.
|
||||
|
||||
Normally rclone checks the size and modification time of files as they
|
||||
are being uploaded and aborts with a message which starts "can't copy
|
||||
- source file is being updated" if the file changes during upload.
|
||||
are being uploaded and aborts with a message which starts "can't copy -
|
||||
source file is being updated" if the file changes during upload.
|
||||
|
||||
However on some file systems this modification time check may fail (e.g.
|
||||
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
||||
@@ -376,8 +374,8 @@ func (f *Fs) caseInsensitive() bool {
|
||||
//
|
||||
// for regular files, localPath is returned unchanged
|
||||
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
||||
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
||||
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
|
||||
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
|
||||
return newLocalPath, isTranslatedLink
|
||||
}
|
||||
|
||||
@@ -504,7 +502,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
@@ -552,7 +550,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
} else {
|
||||
// Check whether this link should be translated
|
||||
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
||||
newRemote += linkSuffix
|
||||
newRemote += fs.LinkSuffix
|
||||
}
|
||||
fso, err := f.newObjectWithInfo(newRemote, fi)
|
||||
if err != nil {
|
||||
@@ -646,7 +644,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
precision = time.Second
|
||||
|
||||
// Create temporary file and test it
|
||||
fd, err := ioutil.TempFile("", "rclone")
|
||||
fd, err := os.CreateTemp("", "rclone")
|
||||
if err != nil {
|
||||
// If failed return 1s
|
||||
// fmt.Println("Failed to create temp file", err)
|
||||
@@ -1073,7 +1071,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -33,7 +33,6 @@ func TestMain(m *testing.M) {
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
@@ -78,7 +77,6 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
func TestSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
@@ -93,7 +91,7 @@ func TestSymlink(t *testing.T) {
|
||||
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
|
||||
|
||||
// Object viewed as symlink
|
||||
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
|
||||
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
|
||||
|
||||
// Object viewed as destination
|
||||
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
|
||||
@@ -122,7 +120,7 @@ func TestSymlink(t *testing.T) {
|
||||
|
||||
// Create a symlink
|
||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
|
||||
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
|
||||
if haveLChtimes {
|
||||
r.CheckLocalItems(t, file1, file2, file3)
|
||||
@@ -138,9 +136,9 @@ func TestSymlink(t *testing.T) {
|
||||
assert.Equal(t, "file.txt", linkText)
|
||||
|
||||
// Check that NewObject gets the correct object
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
|
||||
assert.Equal(t, int64(8), o.Size())
|
||||
|
||||
// Check that NewObject doesn't see the non suffixed version
|
||||
@@ -150,7 +148,7 @@ func TestSymlink(t *testing.T) {
|
||||
// Check reading the object
|
||||
in, err := o.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
contents, err := ioutil.ReadAll(in)
|
||||
contents, err := io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt", string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -158,7 +156,7 @@ func TestSymlink(t *testing.T) {
|
||||
// Check reading the object with range
|
||||
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
|
||||
require.NoError(t, err)
|
||||
contents, err = ioutil.ReadAll(in)
|
||||
contents, err = io.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "file.txt"[2:5+1], string(contents))
|
||||
require.NoError(t, in.Close())
|
||||
@@ -177,7 +175,6 @@ func TestSymlinkError(t *testing.T) {
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -208,7 +205,6 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -237,7 +233,6 @@ func TestHashOnDelete(t *testing.T) {
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
@@ -372,7 +367,6 @@ func TestMetadata(t *testing.T) {
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -13,7 +12,7 @@ import (
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
fd, err := os.CreateTemp("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@@ -1660,7 +1659,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// Attempt to put by calculating hash in memory
|
||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||
fileBuf, err = ioutil.ReadAll(in)
|
||||
fileBuf, err = io.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1703,7 +1702,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if size <= mrhash.Size {
|
||||
// Optimize upload: skip extra request if data fits in the hash buffer.
|
||||
if fileBuf == nil {
|
||||
fileBuf, err = ioutil.ReadAll(wrapIn)
|
||||
fileBuf, err = io.ReadAll(wrapIn)
|
||||
}
|
||||
if fileHash == nil && err == nil {
|
||||
fileHash = mrhash.Sum(fileBuf)
|
||||
@@ -2214,7 +2213,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
fs.Debugf(o, "Server returned full content instead of range")
|
||||
if start > 0 {
|
||||
// Discard the beginning of the data
|
||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||
_, err = io.CopyN(io.Discard, wrapStream, start)
|
||||
if err != nil {
|
||||
closeBody(res)
|
||||
return nil, err
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -575,7 +574,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
data = data[:limit]
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(data)), nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@@ -583,7 +582,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update memory object: %w", err)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -972,7 +971,7 @@ func (o *Object) netStorageUploadRequest(ctx context.Context, in io.Reader, src
|
||||
URL = o.fs.url(src.Remote())
|
||||
}
|
||||
if strings.HasSuffix(URL, ".rclonelink") {
|
||||
bits, err := ioutil.ReadAll(in)
|
||||
bits, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1058,7 +1057,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
|
||||
if strings.HasSuffix(URL, ".rclonelink") && o.target != "" {
|
||||
fs.Infof(nil, "Converting a symlink to the rclonelink file on download %q", URL)
|
||||
reader := strings.NewReader(o.target)
|
||||
readcloser := ioutil.NopCloser(reader)
|
||||
readcloser := io.NopCloser(reader)
|
||||
return readcloser, nil
|
||||
}
|
||||
|
||||
|
||||
161
backend/s3/s3.go
161
backend/s3/s3.go
@@ -15,7 +15,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -26,6 +25,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/corehandlers"
|
||||
@@ -65,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -104,7 +105,7 @@ func init() {
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "Digital Ocean Spaces",
|
||||
Help: "DigitalOcean Spaces",
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
@@ -123,6 +124,9 @@ func init() {
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Liara",
|
||||
Help: "Liara Object Storage",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -436,7 +440,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -761,6 +765,15 @@ func init() {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Liara Object Storage API.",
|
||||
Provider: "Liara",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "storage.iran.liara.space",
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
@@ -923,17 +936,11 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint of the Shared Gateway.",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
Provider: "Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gateway.eu1.storjshare.io",
|
||||
Help: "EU1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.us1.storjshare.io",
|
||||
Help: "US1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.ap1.storjshare.io",
|
||||
Help: "Asia-Pacific Shared Gateway",
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
@@ -1091,22 +1098,34 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
Value: "syd1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Sydney 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "nyc3.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces New York 3",
|
||||
Help: "DigitalOcean Spaces New York 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "ams3.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Amsterdam 3",
|
||||
Help: "DigitalOcean Spaces Amsterdam 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Singapore 1",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
@@ -1176,6 +1195,10 @@ func init() {
|
||||
Value: "s3.ap-southeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 2 (Sydney)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "storage.iran.liara.space",
|
||||
Help: "Liara Iran endpoint",
|
||||
Provider: "Liara",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
@@ -1559,7 +1582,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1569,7 +1592,11 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
|
||||
If the acl is an empty string then no X-Amz-Acl: header is added and
|
||||
the default (private) will be used.
|
||||
`,
|
||||
Provider: "!Storj,Cloudflare",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
@@ -1623,7 +1650,11 @@ doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when only when creating buckets. If it
|
||||
isn't set then "acl" is used instead.`,
|
||||
isn't set then "acl" is used instead.
|
||||
|
||||
If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
|
||||
header is added and the default (private) will be used.
|
||||
`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "private",
|
||||
@@ -1784,6 +1815,15 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://liara.ir/landing/object-storage
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Liara",
|
||||
Provider: "Liara",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "storage_class",
|
||||
@@ -2755,6 +2795,10 @@ func setQuirks(opt *Options) {
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Liara":
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
@@ -2846,6 +2890,14 @@ func (f *Fs) setRoot(root string) {
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// return a pointer to the string if non empty or nil if it is empty
|
||||
func stringPointerOrNil(s string) *string {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
return &s
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Parse config into Options struct
|
||||
@@ -2865,9 +2917,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Versions && opt.VersionAt.IsSet() {
|
||||
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
|
||||
}
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
@@ -3013,6 +3062,17 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
return info, versionID, nil
|
||||
}
|
||||
|
||||
// stringClonePointer clones the string pointed to by sp into new
|
||||
// memory. This is useful to stop us keeping references to small
|
||||
// strings carved out of large XML responses.
|
||||
func stringClonePointer(sp *string) *string {
|
||||
if sp == nil {
|
||||
return nil
|
||||
}
|
||||
var s = *sp
|
||||
return &s
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error ErrorObjectNotFound.
|
||||
@@ -3038,8 +3098,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = info.StorageClass
|
||||
o.versionID = versionID
|
||||
o.storageClass = stringClonePointer(info.StorageClass)
|
||||
o.versionID = stringClonePointer(versionID)
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -3057,19 +3117,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
|
||||
// Gets the bucket location
|
||||
func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
|
||||
req := s3.GetBucketLocationInput{
|
||||
Bucket: &bucket,
|
||||
}
|
||||
var resp *s3.GetBucketLocationOutput
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.c.GetBucketLocation(&req)
|
||||
return f.shouldRetry(ctx, err)
|
||||
region, err := s3manager.GetBucketRegion(ctx, f.ses, bucket, "", func(r *request.Request) {
|
||||
r.Config.S3ForcePathStyle = aws.Bool(f.opt.ForcePathStyle)
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// Updates the region for the bucket by reading the region from the
|
||||
@@ -3186,6 +3240,9 @@ func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if aws.BoolValue(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") {
|
||||
return nil, nil, errors.New("s3 protocol error: received listing v2 with IsTruncated set and no NextContinuationToken. Should you be using `--s3-list-version 1`?")
|
||||
}
|
||||
ls.req.ContinuationToken = resp.NextContinuationToken
|
||||
return resp, nil, nil
|
||||
}
|
||||
@@ -3744,7 +3801,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &f.opt.BucketACL,
|
||||
ACL: stringPointerOrNil(f.opt.BucketACL),
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -3809,7 +3866,7 @@ func pathEscape(s string) string {
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
@@ -4787,23 +4844,12 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentLength := &resp.ContentLength
|
||||
if resp.Header.Get("Content-Range") != "" {
|
||||
var contentRange = resp.Header.Get("Content-Range")
|
||||
slash := strings.IndexRune(contentRange, '/')
|
||||
if slash >= 0 {
|
||||
i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
|
||||
if err == nil {
|
||||
contentLength = &i
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(o, "Failed to find length in %q", contentRange)
|
||||
}
|
||||
contentLength := rest.ParseSizeFromHeaders(resp.Header)
|
||||
if contentLength < 0 {
|
||||
fs.Debugf(o, "Failed to parse file size from headers")
|
||||
}
|
||||
|
||||
lastModified, err := time.Parse(time.RFC1123, resp.Header.Get("Last-Modified"))
|
||||
lastModified, err := http.ParseTime(resp.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err)
|
||||
}
|
||||
@@ -4827,7 +4873,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
||||
|
||||
var head = s3.HeadObjectOutput{
|
||||
ETag: header("Etag"),
|
||||
ContentLength: contentLength,
|
||||
ContentLength: &contentLength,
|
||||
LastModified: &lastModified,
|
||||
Metadata: metaData,
|
||||
CacheControl: header("Cache-Control"),
|
||||
@@ -5173,7 +5219,7 @@ func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjec
|
||||
// Can't upload zero length files like this for some reason
|
||||
r.Body = bytes.NewReader([]byte{})
|
||||
} else {
|
||||
r.SetStreamingBody(ioutil.NopCloser(in))
|
||||
r.SetStreamingBody(io.NopCloser(in))
|
||||
}
|
||||
r.SetContext(ctx)
|
||||
r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
|
||||
@@ -5287,7 +5333,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
ACL: stringPointerOrNil(o.fs.opt.ACL),
|
||||
Key: &bucketPath,
|
||||
}
|
||||
|
||||
@@ -5455,7 +5501,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.versionID = versionID
|
||||
// Only record versionID if we are using --s3-versions or --s3-version-at
|
||||
if o.fs.opt.Versions || o.fs.opt.VersionAt.IsSet() {
|
||||
o.versionID = versionID
|
||||
} else {
|
||||
o.versionID = nil
|
||||
}
|
||||
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -633,7 +632,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
|
||||
})
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -123,7 +122,10 @@ This enables the use of the following insecure ciphers and key exchange methods:
|
||||
- diffie-hellman-group-exchange-sha256
|
||||
- diffie-hellman-group-exchange-sha1
|
||||
|
||||
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.`,
|
||||
Those algorithms are insecure and may allow plaintext data to be recovered by an attacker.
|
||||
|
||||
This must be false if you use either ciphers or key_exchange advanced options.
|
||||
`,
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -325,6 +327,46 @@ and pass variables with spaces in in quotes, eg
|
||||
|
||||
"VAR3=value with space" "VAR4=value with space" VAR5=nospacehere
|
||||
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "ciphers",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of ciphers to be used for session encryption, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q cipher.
|
||||
|
||||
This must not be set if use_insecure_cipher is true.
|
||||
|
||||
Example:
|
||||
|
||||
aes128-ctr aes192-ctr aes256-ctr aes128-gcm@openssh.com aes256-gcm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "key_exchange",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of key exchange algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q kex.
|
||||
|
||||
This must not be set if use_insecure_cipher is true.
|
||||
|
||||
Example:
|
||||
|
||||
sntrup761x25519-sha512@openssh.com curve25519-sha256 curve25519-sha256@libssh.org ecdh-sha2-nistp256
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "macs",
|
||||
Default: fs.SpaceSepList{},
|
||||
Help: `Space separated list of MACs (message authentication code) algorithms, ordered by preference.
|
||||
|
||||
At least one must match with server configuration. This can be checked for example using ssh -Q mac.
|
||||
|
||||
Example:
|
||||
|
||||
umac-64-etm@openssh.com umac-128-etm@openssh.com hmac-sha2-256-etm@openssh.com
|
||||
`,
|
||||
Advanced: true,
|
||||
}},
|
||||
@@ -362,6 +404,9 @@ type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SetEnv fs.SpaceSepList `config:"set_env"`
|
||||
Ciphers fs.SpaceSepList `config:"ciphers"`
|
||||
KeyExchange fs.SpaceSepList `config:"key_exchange"`
|
||||
MACs fs.SpaceSepList `config:"macs"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote SFTP files
|
||||
@@ -702,10 +747,25 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
sshConfig.HostKeyCallback = hostcallback
|
||||
}
|
||||
|
||||
if opt.UseInsecureCipher && (opt.Ciphers != nil || opt.KeyExchange != nil) {
|
||||
return nil, fmt.Errorf("use_insecure_cipher must be false if ciphers or key_exchange are set in advanced configuration")
|
||||
}
|
||||
|
||||
sshConfig.Config.SetDefaults()
|
||||
if opt.UseInsecureCipher {
|
||||
sshConfig.Config.SetDefaults()
|
||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc", "aes192-cbc", "aes256-cbc", "3des-cbc")
|
||||
sshConfig.Config.KeyExchanges = append(sshConfig.Config.KeyExchanges, "diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256")
|
||||
} else {
|
||||
if opt.Ciphers != nil {
|
||||
sshConfig.Config.Ciphers = opt.Ciphers
|
||||
}
|
||||
if opt.KeyExchange != nil {
|
||||
sshConfig.Config.KeyExchanges = opt.KeyExchange
|
||||
}
|
||||
}
|
||||
|
||||
if opt.MACs != nil {
|
||||
sshConfig.Config.MACs = opt.MACs
|
||||
}
|
||||
|
||||
keyFile := env.ShellExpand(opt.KeyFile)
|
||||
@@ -722,7 +782,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
return nil, fmt.Errorf("couldn't read ssh agent signers: %w", err)
|
||||
}
|
||||
if keyFile != "" {
|
||||
pubBytes, err := ioutil.ReadFile(keyFile + ".pub")
|
||||
pubBytes, err := os.ReadFile(keyFile + ".pub")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read public key file: %w", err)
|
||||
}
|
||||
@@ -751,7 +811,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if keyFile != "" || opt.KeyPem != "" {
|
||||
var key []byte
|
||||
if opt.KeyPem == "" {
|
||||
key, err = ioutil.ReadFile(keyFile)
|
||||
key, err = os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
}
|
||||
@@ -782,7 +842,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
|
||||
// If a public key has been specified then use that
|
||||
if pubkeyFile != "" {
|
||||
certfile, err := ioutil.ReadFile(pubkeyFile)
|
||||
certfile, err := os.ReadFile(pubkeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read cert file: %w", err)
|
||||
}
|
||||
@@ -915,20 +975,24 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
fs.Debugf(f, "Running shell type detection remote command: %s", shellCmd)
|
||||
err = session.Run(shellCmd)
|
||||
_ = session.Close()
|
||||
f.shellType = defaultShellType
|
||||
if err != nil {
|
||||
f.shellType = defaultShellType
|
||||
fs.Debugf(f, "Remote command failed: %v (stdout=%v) (stderr=%v)", err, bytes.TrimSpace(stdout.Bytes()), bytes.TrimSpace(stderr.Bytes()))
|
||||
} else {
|
||||
outBytes := stdout.Bytes()
|
||||
fs.Debugf(f, "Remote command result: %s", outBytes)
|
||||
outString := string(bytes.TrimSpace(stdout.Bytes()))
|
||||
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // If PowerShell: "Microsoft.PowerShell%ComSpec%"
|
||||
f.shellType = "powershell"
|
||||
} else if !strings.HasSuffix(outString, "%ComSpec%") { // If Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
|
||||
f.shellType = "cmd"
|
||||
} else { // If Unix: "%ComSpec%"
|
||||
f.shellType = "unix"
|
||||
}
|
||||
if outString != "" {
|
||||
if strings.HasPrefix(outString, "Microsoft.PowerShell") { // PowerShell: "Microsoft.PowerShell%ComSpec%"
|
||||
f.shellType = "powershell"
|
||||
} else if !strings.HasSuffix(outString, "%ComSpec%") { // Command Prompt: "${ShellId}C:\WINDOWS\system32\cmd.exe"
|
||||
// Additional positive test, to avoid misdetection on unpredicted Unix shell variants
|
||||
s := strings.ToLower(outString)
|
||||
if strings.Contains(s, ".exe") || strings.Contains(s, ".com") {
|
||||
f.shellType = "cmd"
|
||||
}
|
||||
} // POSIX-based Unix shell: "%ComSpec%"
|
||||
} // fish Unix shell: ""
|
||||
}
|
||||
}
|
||||
// Save permanently in config to avoid the extra work next time
|
||||
@@ -1711,11 +1775,14 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
|
||||
// statRemote stats the file or directory at the remote given
|
||||
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||
absPath := remote
|
||||
if !strings.HasPrefix(remote, "/") {
|
||||
absPath = path.Join(f.absRoot, remote)
|
||||
}
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat: %w", err)
|
||||
}
|
||||
absPath := path.Join(f.absRoot, remote)
|
||||
info, err = c.sftpClient.Stat(absPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
return info, err
|
||||
|
||||
@@ -77,7 +77,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -479,7 +478,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open timezone db: %w", err)
|
||||
}
|
||||
tzdata, err := ioutil.ReadAll(timezone)
|
||||
tzdata, err := io.ReadAll(timezone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read timezone: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
pathpkg "path"
|
||||
@@ -119,7 +118,7 @@ func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
if f.grPos < f.seekPos {
|
||||
// Fast-forward.
|
||||
_, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos)
|
||||
_, err = io.CopyN(io.Discard, f.gr, f.seekPos-f.grPos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -478,11 +478,15 @@ func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo)
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
dir := path.Dir(_path)
|
||||
if dir == "." {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path.Dir(_path)), 0o755)
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -160,6 +160,7 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -720,3 +721,43 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Copy parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.CopyObjectOptions{}
|
||||
|
||||
// Do the copy
|
||||
newObject, err := f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
newObject, err = f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the new object
|
||||
return newObjectFromUplink(f, remote, newObject), nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ package sugarsync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestErrorHandler(t *testing.T) {
|
||||
} {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resp := http.Response{
|
||||
Body: ioutil.NopCloser(bytes.NewBufferString(test.body)),
|
||||
Body: io.NopCloser(bytes.NewBufferString(test.body)),
|
||||
StatusCode: test.code,
|
||||
Status: test.status,
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/swift/v2"
|
||||
@@ -136,7 +135,7 @@ func (f *Fs) testWithChunkFail(t *testing.T) {
|
||||
buf := bytes.NewBufferString(contents[:errPosition])
|
||||
errMessage := "potato"
|
||||
er := &readers.ErrorReader{Err: errors.New(errMessage)}
|
||||
in := ioutil.NopCloser(io.MultiReader(buf, er))
|
||||
in := io.NopCloser(io.MultiReader(buf, er))
|
||||
|
||||
file.Size = contentSize
|
||||
obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -87,7 +86,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
|
||||
if len(entries) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
_, _ = io.Copy(io.Discard, readers[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -501,7 +500,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
||||
if len(upstreams) > 1 {
|
||||
// Drain the input buffer to allow other uploads to continue
|
||||
_, _ = io.Copy(ioutil.Discard, readers[i])
|
||||
_, _ = io.Copy(io.Discard, readers[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -239,7 +238,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
|
||||
defer fs.CheckClose(resp.Body, &err)
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@@ -1219,7 +1218,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
if partialContent && resp.StatusCode == 200 {
|
||||
if start > 0 {
|
||||
// We need to read and discard the beginning of the data...
|
||||
_, err = io.CopyN(ioutil.Discard, resp.Body, start)
|
||||
_, err = io.CopyN(io.Discard, resp.Body, start)
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
_ = resp.Body.Close()
|
||||
|
||||
17
bin/backend-versions.sh
Executable file
17
bin/backend-versions.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# This adds the version each backend was released to its docs page
|
||||
set -e
|
||||
for backend in $( find backend -maxdepth 1 -type d ); do
|
||||
backend=$(basename $backend)
|
||||
if [[ "$backend" == "backend" || "$backend" == "vfs" || "$backend" == "all" || "$backend" == "azurefile" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||
if [ "$commit" == "" ]; then
|
||||
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||
fi
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||
echo $backend $version
|
||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||
done
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -240,7 +239,7 @@ func buildWindowsResourceSyso(goarch string, versionTag string) string {
|
||||
log.Printf("Failed to resolve path: %v", err)
|
||||
return ""
|
||||
}
|
||||
err = ioutil.WriteFile(jsonPath, bs, 0644)
|
||||
err = os.WriteFile(jsonPath, bs, 0644)
|
||||
if err != nil {
|
||||
log.Printf("Failed to write %s: %v", jsonPath, err)
|
||||
return ""
|
||||
@@ -476,7 +475,7 @@ func main() {
|
||||
run("mkdir", "build")
|
||||
}
|
||||
chdir("build")
|
||||
err := ioutil.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
|
||||
err := os.WriteFile("version.txt", []byte(fmt.Sprintf("rclone %s\n", version)), 0666)
|
||||
if err != nil {
|
||||
log.Fatalf("Couldn't write version.txt: %v", err)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -168,7 +167,7 @@ func defaultBinDir() string {
|
||||
|
||||
// read the body or an error message
|
||||
func readBody(in io.Reader) string {
|
||||
data, err := ioutil.ReadAll(in)
|
||||
data, err := io.ReadAll(in)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error reading body: %v", err.Error())
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -56,7 +55,7 @@ func main() {
|
||||
log.Fatalf("Syntax: %s", os.Args[0])
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := ioutil.ReadFile("VERSION")
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err)
|
||||
}
|
||||
|
||||
@@ -93,6 +93,9 @@ provided by a backend. Where the value is unlimited it is omitted.
|
||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -30,6 +30,9 @@ rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
|
||||
@@ -58,6 +58,9 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
name, remote := args[0], args[1]
|
||||
|
||||
@@ -5,7 +5,6 @@ package bilib
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -106,7 +105,7 @@ func CopyDir(src string, dst string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
entries, err := ioutil.ReadDir(src)
|
||||
entries, err := os.ReadDir(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -122,7 +121,7 @@ func CopyDir(src string, dst string) (err error) {
|
||||
}
|
||||
} else {
|
||||
// Skip symlinks.
|
||||
if entry.Mode()&os.ModeSymlink != 0 {
|
||||
if entry.Type()&os.ModeSymlink != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ package bilib
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
@@ -57,5 +57,5 @@ func SaveList(list []string, path string) error {
|
||||
_, _ = buf.WriteString(strconv.Quote(s))
|
||||
_ = buf.WriteByte('\n')
|
||||
}
|
||||
return ioutil.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
return os.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
@@ -303,7 +302,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
|
||||
// Execute test scenario
|
||||
scenFile := filepath.Join(b.testDir, "scenario.txt")
|
||||
scenBuf, err := ioutil.ReadFile(scenFile)
|
||||
scenBuf, err := os.ReadFile(scenFile)
|
||||
scenReplacer := b.newReplacer(false)
|
||||
require.NoError(b.t, err)
|
||||
b.step = 0
|
||||
@@ -903,8 +902,8 @@ func (b *bisyncTest) compareResults() int {
|
||||
// save mangled logs so difference is easier on eyes
|
||||
goldenFile := filepath.Join(b.logDir, "mangled.golden.log")
|
||||
resultFile := filepath.Join(b.logDir, "mangled.result.log")
|
||||
require.NoError(b.t, ioutil.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
|
||||
require.NoError(b.t, ioutil.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
require.NoError(b.t, os.WriteFile(goldenFile, []byte(goldenText), bilib.PermSecure))
|
||||
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
}
|
||||
|
||||
if goldenText == resultText {
|
||||
@@ -974,7 +973,7 @@ func (b *bisyncTest) storeGolden() {
|
||||
|
||||
goldName := b.toGolden(fileName)
|
||||
goldPath := filepath.Join(b.goldenDir, goldName)
|
||||
err := ioutil.WriteFile(goldPath, []byte(text), bilib.PermSecure)
|
||||
err := os.WriteFile(goldPath, []byte(text), bilib.PermSecure)
|
||||
assert.NoError(b.t, err, "writing golden file %s", goldName)
|
||||
|
||||
if goldName != fileName {
|
||||
@@ -986,7 +985,7 @@ func (b *bisyncTest) storeGolden() {
|
||||
|
||||
// mangleResult prepares test logs or listings for comparison
|
||||
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
buf, err := ioutil.ReadFile(filepath.Join(dir, file))
|
||||
buf, err := os.ReadFile(filepath.Join(dir, file))
|
||||
require.NoError(b.t, err)
|
||||
text := string(buf)
|
||||
|
||||
@@ -1205,7 +1204,7 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
|
||||
}
|
||||
|
||||
func (b *bisyncTest) listDir(dir string) (names []string) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
files, err := os.ReadDir(dir)
|
||||
require.NoError(b.t, err)
|
||||
for _, file := range files {
|
||||
names = append(names, filepath.Base(file.Name()))
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -116,6 +115,9 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "bisync remote1:path1 remote2:path2",
|
||||
Short: shortHelp,
|
||||
Long: longHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
||||
@@ -198,7 +200,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
||||
_ = f.Close()
|
||||
|
||||
hashFile := filtersFile + ".md5"
|
||||
wantHash, err := ioutil.ReadFile(hashFile)
|
||||
wantHash, err := os.ReadFile(hashFile)
|
||||
if err != nil && !opt.Resync {
|
||||
return ctx, fmt.Errorf("filters file md5 hash not found (must run --resync): %s", filtersFile)
|
||||
}
|
||||
@@ -209,7 +211,7 @@ func (opt *Options) applyFilters(ctx context.Context) (context.Context, error) {
|
||||
|
||||
if opt.Resync {
|
||||
fs.Infof(nil, "Storing filters file hash to %s", hashFile)
|
||||
if err := ioutil.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
if err := os.WriteFile(hashFile, []byte(gotHash), bilib.PermSecure); err != nil {
|
||||
return ctx, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -81,7 +80,7 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = ioutil.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
if err = os.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", lockFile)
|
||||
|
||||
@@ -25,6 +25,10 @@ var commandDefinition = &cobra.Command{
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
"status": "Deprecated",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
|
||||
|
||||
@@ -4,7 +4,6 @@ package cat
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -58,6 +57,9 @@ the end and |--offset| and |--count| to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
|--offset -1 --count 1| is equivalent to |--tail 1|.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
usedHead := head > 0
|
||||
@@ -77,7 +79,7 @@ Note that if offset is negative it will count from the end, so
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
var w io.Writer = os.Stdout
|
||||
if discard {
|
||||
w = ioutil.Discard
|
||||
w = io.Discard
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
return operations.Cat(context.Background(), fsrc, w, offset, count)
|
||||
|
||||
@@ -37,6 +37,9 @@ that don't support hashes or if you really want to check all the data.
|
||||
|
||||
Note that hash values in the SUM file are treated as case insensitive.
|
||||
`, "|", "`") + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 3, command, args)
|
||||
var hashType hash.Type
|
||||
|
||||
@@ -20,6 +20,9 @@ var commandDefinition = &cobra.Command{
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
132
cmd/cmount/fs.go
132
cmd/cmount/fs.go
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -95,8 +96,11 @@ func (fsys *FS) closeHandle(fh uint64) (errc int) {
|
||||
}
|
||||
|
||||
// lookup a Node given a path
|
||||
func (fsys *FS) lookupNode(path string) (node vfs.Node, errc int) {
|
||||
func (fsys *FS) lookupNode(path string) (vfs.Node, int) {
|
||||
node, err := fsys.VFS.Stat(path)
|
||||
if err == vfs.ENOENT && fsys.VFS.Opt.Links {
|
||||
node, err = fsys.VFS.Stat(path + fs.LinkSuffix)
|
||||
}
|
||||
return node, translateError(err)
|
||||
}
|
||||
|
||||
@@ -117,6 +121,13 @@ func (fsys *FS) lookupDir(path string) (dir *vfs.Dir, errc int) {
|
||||
func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, errc int) {
|
||||
parentDir, leaf := path.Split(filePath)
|
||||
dir, errc = fsys.lookupDir(parentDir)
|
||||
// Try to get real leaf for symlinks
|
||||
if fsys.VFS.Opt.Links {
|
||||
node, e := fsys.lookupNode(filePath)
|
||||
if e == 0 {
|
||||
leaf = node.Name()
|
||||
}
|
||||
}
|
||||
return leaf, dir, errc
|
||||
}
|
||||
|
||||
@@ -153,15 +164,9 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
|
||||
Size := uint64(node.Size())
|
||||
Blocks := (Size + 511) / 512
|
||||
modTime := node.ModTime()
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
//stat.Dev = 1
|
||||
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
|
||||
stat.Mode = uint32(Mode)
|
||||
stat.Mode = getMode(node)
|
||||
stat.Nlink = 1
|
||||
stat.Uid = fsys.VFS.Opt.UID
|
||||
stat.Gid = fsys.VFS.Opt.GID
|
||||
@@ -252,7 +257,7 @@ func (fsys *FS) Readdir(dirPath string,
|
||||
fill(".", nil, 0)
|
||||
fill("..", nil, 0)
|
||||
for _, node := range nodes {
|
||||
name := node.Name()
|
||||
name, _ := fsys.VFS.TrimSymlink(node.Name())
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(dirPath, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
@@ -330,13 +335,15 @@ func (fsys *FS) CreateEx(filePath string, mode uint32, fi *fuse.FileInfo_t) (err
|
||||
if errc != 0 {
|
||||
return errc
|
||||
}
|
||||
file, err := parentDir.Create(leaf, fi.Flags)
|
||||
// translate the fuse flags to os flags
|
||||
osFlags := translateOpenFlags(fi.Flags) | os.O_CREATE
|
||||
// translate the fuse mode to os mode
|
||||
//osMode := getFileMode(mode)
|
||||
file, err := parentDir.Create(leaf, osFlags)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
flags := translateOpenFlags(fi.Flags) | os.O_CREATE
|
||||
handle, err := file.Open(flags)
|
||||
handle, err := file.Open(osFlags)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
@@ -456,6 +463,18 @@ func (fsys *FS) Rmdir(dirPath string) (errc int) {
|
||||
// Rename renames a file.
|
||||
func (fsys *FS) Rename(oldPath string, newPath string) (errc int) {
|
||||
defer log.Trace(oldPath, "newPath=%q", newPath)("errc=%d", &errc)
|
||||
|
||||
if fsys.VFS.Opt.Links {
|
||||
node, e := fsys.lookupNode(oldPath)
|
||||
|
||||
if e == 0 {
|
||||
if strings.HasSuffix(node.Name(), fs.LinkSuffix) {
|
||||
oldPath += fs.LinkSuffix
|
||||
newPath += fs.LinkSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return translateError(fsys.VFS.Rename(oldPath, newPath))
|
||||
}
|
||||
|
||||
@@ -505,14 +524,58 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
|
||||
|
||||
// Symlink creates a symbolic link.
|
||||
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
|
||||
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
|
||||
return -fuse.ENOSYS
|
||||
defer log.Trace(fsys, "Requested to symlink newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
|
||||
|
||||
if fsys.VFS.Opt.Links {
|
||||
// The user must NOT provide .rclonelink suffix
|
||||
if strings.HasSuffix(newpath, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", newpath)
|
||||
return translateError(vfs.EINVAL)
|
||||
}
|
||||
|
||||
newpath += fs.LinkSuffix
|
||||
} else {
|
||||
// The user must provide .rclonelink suffix
|
||||
if !strings.HasSuffix(newpath, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", newpath)
|
||||
return translateError(vfs.EINVAL)
|
||||
}
|
||||
}
|
||||
|
||||
// Add target suffix when linking to a link
|
||||
if !strings.HasSuffix(target, fs.LinkSuffix) {
|
||||
vnode, err := fsys.lookupNode(target)
|
||||
if err == 0 && strings.HasSuffix(vnode.Name(), fs.LinkSuffix) {
|
||||
target += fs.LinkSuffix
|
||||
}
|
||||
}
|
||||
|
||||
return translateError(fsys.VFS.Symlink(target, newpath))
|
||||
}
|
||||
|
||||
// Readlink reads the target of a symbolic link.
|
||||
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
|
||||
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
|
||||
return -fuse.ENOSYS, ""
|
||||
defer log.Trace(fsys, "Requested to read link")("errc=%v, linkPath=%q", &errc, linkPath)
|
||||
|
||||
if fsys.VFS.Opt.Links {
|
||||
// The user must NOT provide .rclonelink suffix
|
||||
if strings.HasSuffix(path, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", path)
|
||||
return translateError(vfs.EINVAL), ""
|
||||
}
|
||||
|
||||
path += fs.LinkSuffix
|
||||
} else {
|
||||
// The user must provide .rclonelink suffix
|
||||
if !strings.HasSuffix(path, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", path)
|
||||
return translateError(vfs.EINVAL), ""
|
||||
}
|
||||
}
|
||||
|
||||
linkPath, err := fsys.VFS.Readlink(path)
|
||||
linkPath, _ = fsys.VFS.TrimSymlink(linkPath)
|
||||
return translateError(err), linkPath
|
||||
}
|
||||
|
||||
// Chmod changes the permission bits of a file.
|
||||
@@ -627,6 +690,41 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
return outFlags
|
||||
}
|
||||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
vfsMode := node.Mode()
|
||||
Mode := vfsMode.Perm()
|
||||
if vfsMode&os.ModeDir != 0 {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else if vfsMode&os.ModeSymlink != 0 {
|
||||
Mode |= fuse.S_IFLNK
|
||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
||||
Mode |= fuse.S_IFIFO
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
return uint32(Mode)
|
||||
}
|
||||
|
||||
// convert fuse mode to os.FileMode
|
||||
// func getFileMode(mode uint32) os.FileMode {
|
||||
// osMode := os.FileMode(0)
|
||||
// if mode&fuse.S_IFDIR != 0 {
|
||||
// mode ^= fuse.S_IFDIR
|
||||
// osMode |= os.ModeDir
|
||||
// } else if mode&fuse.S_IFREG != 0 {
|
||||
// mode ^= fuse.S_IFREG
|
||||
// } else if mode&fuse.S_IFLNK != 0 {
|
||||
// mode ^= fuse.S_IFLNK
|
||||
// osMode |= os.ModeSymlink
|
||||
// } else if mode&fuse.S_IFIFO != 0 {
|
||||
// mode ^= fuse.S_IFIFO
|
||||
// osMode |= os.ModeNamedPipe
|
||||
// }
|
||||
// osMode |= os.FileMode(mode)
|
||||
// return osMode
|
||||
// }
|
||||
|
||||
// Make sure interfaces are satisfied
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
|
||||
@@ -44,6 +44,9 @@ var configCommand = &cobra.Command{
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.EditConfig(context.Background())
|
||||
@@ -54,12 +57,18 @@ var configEditCommand = &cobra.Command{
|
||||
Use: "edit",
|
||||
Short: configCommand.Short,
|
||||
Long: configCommand.Long,
|
||||
Run: configCommand.Run,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: configCommand.Run,
|
||||
}
|
||||
|
||||
var configFileCommand = &cobra.Command{
|
||||
Use: "file",
|
||||
Short: `Show path of configuration file in use.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.ShowConfigLocation()
|
||||
@@ -69,6 +78,9 @@ var configFileCommand = &cobra.Command{
|
||||
var configTouchCommand = &cobra.Command{
|
||||
Use: "touch",
|
||||
Short: `Ensure configuration file exists.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.SaveConfig()
|
||||
@@ -78,6 +90,9 @@ var configTouchCommand = &cobra.Command{
|
||||
var configPathsCommand = &cobra.Command{
|
||||
Use: "paths",
|
||||
Short: `Show paths used for configuration, cache, temp etc.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.57",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
fmt.Printf("Config file: %s\n", config.GetConfigPath())
|
||||
@@ -89,6 +104,9 @@ var configPathsCommand = &cobra.Command{
|
||||
var configShowCommand = &cobra.Command{
|
||||
Use: "show [<remote>]",
|
||||
Short: `Print (decrypted) config file, or the config for a single remote.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 {
|
||||
@@ -103,6 +121,9 @@ var configShowCommand = &cobra.Command{
|
||||
var configDumpCommand = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: `Dump the config file as JSON.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.Dump()
|
||||
@@ -112,6 +133,9 @@ var configDumpCommand = &cobra.Command{
|
||||
var configProvidersCommand = &cobra.Command{
|
||||
Use: "providers",
|
||||
Short: `List in JSON format all the providers and options.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.JSONListProviders()
|
||||
@@ -152,7 +176,7 @@ This will look something like (some irrelevant detail removed):
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
@@ -226,6 +250,9 @@ using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
in, err := argsToMap(args[2:])
|
||||
@@ -289,6 +316,9 @@ require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
@@ -304,6 +334,9 @@ require this add an extra parameter thus:
|
||||
var configDeleteCommand = &cobra.Command{
|
||||
Use: "delete name",
|
||||
Short: "Delete an existing remote.",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
config.DeleteRemote(args[0])
|
||||
@@ -326,6 +359,9 @@ For example, to set password of a remote of name myremote you would do:
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
|
||||
@@ -46,6 +46,9 @@ the destination.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
@@ -51,6 +51,9 @@ destination if there is one with the same name.
|
||||
Setting ` + "`--stdout`" + ` or making the output file name ` + "`-`" + `
|
||||
will cause the output to be written to standard output.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
|
||||
@@ -47,6 +47,9 @@ the files in remote:path.
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
|
||||
@@ -41,6 +41,9 @@ use it like this
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 11, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -135,6 +135,9 @@ Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
if len(args) > 1 {
|
||||
|
||||
@@ -53,6 +53,9 @@ delete all files bigger than 100 MiB.
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -22,6 +22,9 @@ Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
|
||||
@@ -17,4 +17,7 @@ var completionDefinition = &cobra.Command{
|
||||
Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package genautocomplete
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -9,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCompletionBash(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_bash")
|
||||
tempFile, err := os.CreateTemp("", "completion_bash")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -18,14 +17,14 @@ func TestCompletionBash(t *testing.T) {
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
bs, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionBashStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -37,13 +36,13 @@ func TestCompletionBashStdout(t *testing.T) {
|
||||
|
||||
bashCommandDefinition.Run(bashCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
output, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionZsh(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -52,14 +51,14 @@ func TestCompletionZsh(t *testing.T) {
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
bs, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionZshStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -70,13 +69,13 @@ func TestCompletionZshStdout(t *testing.T) {
|
||||
defer func() { os.Stdout = originalStdout }()
|
||||
|
||||
zshCommandDefinition.Run(zshCommandDefinition, []string{"-"})
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
output, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
func TestCompletionFish(t *testing.T) {
|
||||
tempFile, err := ioutil.TempFile("", "completion_fish")
|
||||
tempFile, err := os.CreateTemp("", "completion_fish")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -85,14 +84,14 @@ func TestCompletionFish(t *testing.T) {
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{tempFile.Name()})
|
||||
|
||||
bs, err := ioutil.ReadFile(tempFile.Name())
|
||||
bs, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(bs))
|
||||
}
|
||||
|
||||
func TestCompletionFishStdout(t *testing.T) {
|
||||
originalStdout := os.Stdout
|
||||
tempFile, err := ioutil.TempFile("", "completion_zsh")
|
||||
tempFile, err := os.CreateTemp("", "completion_zsh")
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = tempFile.Close()
|
||||
@@ -104,7 +103,7 @@ func TestCompletionFishStdout(t *testing.T) {
|
||||
|
||||
fishCommandDefinition.Run(fishCommandDefinition, []string{"-"})
|
||||
|
||||
output, err := ioutil.ReadFile(tempFile.Name())
|
||||
output, err := os.ReadFile(tempFile.Name())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, string(output))
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package gendocs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
@@ -32,6 +31,7 @@ type frontmatter struct {
|
||||
Slug string
|
||||
URL string
|
||||
Source string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
@@ -39,6 +39,9 @@ title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
slug: {{ .Slug }}
|
||||
url: {{ .URL }}
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in {{ .Source }} and as part of making a release run "make commanddocs"
|
||||
---
|
||||
`))
|
||||
@@ -50,6 +53,9 @@ var commandDefinition = &cobra.Command{
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
@@ -71,22 +77,29 @@ rclone.org website.`,
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
|
||||
err = os.WriteFile(filepath.Join(root, "flags.md"), buf.Bytes(), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Look up name => description for prepender
|
||||
var description = map[string]string{}
|
||||
var addDescription func(root *cobra.Command)
|
||||
addDescription = func(root *cobra.Command) {
|
||||
// Look up name => details for prepender
|
||||
type commandDetails struct {
|
||||
Short string
|
||||
Annotations map[string]string
|
||||
}
|
||||
var commands = map[string]commandDetails{}
|
||||
var addCommandDetails func(root *cobra.Command)
|
||||
addCommandDetails = func(root *cobra.Command) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
description[name] = root.Short
|
||||
commands[name] = commandDetails{
|
||||
Short: root.Short,
|
||||
Annotations: root.Annotations,
|
||||
}
|
||||
for _, c := range root.Commands() {
|
||||
addDescription(c)
|
||||
addCommandDetails(c)
|
||||
}
|
||||
}
|
||||
addDescription(cmd.Root)
|
||||
addCommandDetails(cmd.Root)
|
||||
|
||||
// markup for the docs files
|
||||
prepender := func(filename string) string {
|
||||
@@ -95,10 +108,11 @@ rclone.org website.`,
|
||||
data := frontmatter{
|
||||
Date: now,
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: description[name],
|
||||
Description: commands[name].Short,
|
||||
Slug: base,
|
||||
URL: "/commands/" + strings.ToLower(base) + "/",
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
Annotations: commands[name].Annotations,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := frontmatterTemplate.Execute(&buf, data)
|
||||
@@ -129,7 +143,7 @@ rclone.org website.`,
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -140,7 +154,7 @@ See the [global flags page](/flags/) for global options not listed here.
|
||||
### SEE ALSO`, 1)
|
||||
// outdent all the titles by one
|
||||
doc = outdentTitle.ReplaceAllString(doc, `$1`)
|
||||
err = ioutil.WriteFile(path, []byte(doc), 0777)
|
||||
err = os.WriteFile(path, []byte(doc), 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -112,6 +112,9 @@ Then
|
||||
|
||||
Note that hash names are case insensitive and values are output in lower case.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 2, command, args)
|
||||
if len(args) == 0 {
|
||||
|
||||
@@ -49,6 +49,9 @@ link. Exact capabilities depend on the remote, but the link will
|
||||
always by default be created with the least constraints – e.g. no
|
||||
expiry, no password protection, accessible without account.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc, remote := cmd.NewFsFile(args[0])
|
||||
|
||||
@@ -30,6 +30,9 @@ rclone listremotes lists all the available remotes from the config file.
|
||||
|
||||
When used with the ` + "`--long`" + ` flag it lists the types too.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.34",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := config.FileSections()
|
||||
|
||||
@@ -142,6 +142,9 @@ those only (without traversing the whole directory structure):
|
||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -112,6 +112,9 @@ will be shown ("2017-05-31T16:15:57+01:00").
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written one to a line.
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
var fsrc fs.Fs
|
||||
|
||||
@@ -31,6 +31,9 @@ Eg
|
||||
37600 2016-06-25 18:55:40.814629136 fubuwic
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -38,6 +38,9 @@ by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
as a relative path).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.MD5, args, 0); found {
|
||||
|
||||
103
cmd/mount/dir.go
103
cmd/mount/dir.go
@@ -8,6 +8,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
@@ -28,13 +30,21 @@ type Dir struct {
|
||||
// Check interface satisfied
|
||||
var _ fusefs.Node = (*Dir)(nil)
|
||||
|
||||
func fallbackStat(dir *vfs.Dir, leaf string) (node vfs.Node, err error) {
|
||||
node, err = dir.Stat(leaf)
|
||||
if err == vfs.ENOENT && dir.VFS().Opt.Links {
|
||||
node, err = dir.Stat(leaf + fs.LinkSuffix)
|
||||
}
|
||||
return node, err
|
||||
}
|
||||
|
||||
// Attr updates the attributes of a directory
|
||||
func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
defer log.Trace(d, "")("attr=%+v, err=%v", a, &err)
|
||||
a.Valid = d.fsys.opt.AttrTimeout
|
||||
a.Gid = d.VFS().Opt.GID
|
||||
a.Uid = d.VFS().Opt.UID
|
||||
a.Mode = os.ModeDir | d.VFS().Opt.DirPerms
|
||||
a.Mode = d.Mode()
|
||||
modTime := d.ModTime()
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
@@ -74,7 +84,7 @@ var _ fusefs.NodeRequestLookuper = (*Dir)(nil)
|
||||
// Lookup need not to handle the names "." and "..".
|
||||
func (d *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fusefs.Node, err error) {
|
||||
defer log.Trace(d, "name=%q", req.Name)("node=%+v, err=%v", &node, &err)
|
||||
mnode, err := d.Dir.Stat(req.Name)
|
||||
mnode, err := fallbackStat(d.Dir, req.Name)
|
||||
if err != nil {
|
||||
return nil, translateError(err)
|
||||
}
|
||||
@@ -117,7 +127,7 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||
Name: "..",
|
||||
})
|
||||
for _, node := range items {
|
||||
name := node.Name()
|
||||
name, isLink := d.VFS().TrimSymlink(node.Name())
|
||||
if len(name) > mountlib.MaxLeafSize {
|
||||
fs.Errorf(d, "Name too long (%d bytes) for FUSE, skipping: %s", len(name), name)
|
||||
continue
|
||||
@@ -127,7 +137,9 @@ func (d *Dir) ReadDirAll(ctx context.Context) (dirents []fuse.Dirent, err error)
|
||||
Type: fuse.DT_File,
|
||||
Name: name,
|
||||
}
|
||||
if node.IsDir() {
|
||||
if isLink {
|
||||
dirent.Type = fuse.DT_Link
|
||||
} else if node.IsDir() {
|
||||
dirent.Type = fuse.DT_Dir
|
||||
}
|
||||
dirents = append(dirents, dirent)
|
||||
@@ -141,11 +153,13 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
|
||||
// Create makes a new file
|
||||
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
|
||||
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
|
||||
file, err := d.Dir.Create(req.Name, int(req.Flags))
|
||||
// translate the fuse flags to os flags
|
||||
osFlags := int(req.Flags) | os.O_CREATE
|
||||
file, err := d.Dir.Create(req.Name, osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
|
||||
fh, err := file.Open(osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, translateError(err)
|
||||
}
|
||||
@@ -175,7 +189,18 @@ var _ fusefs.NodeRemover = (*Dir)(nil)
|
||||
// may correspond to a file (unlink) or to a directory (rmdir).
|
||||
func (d *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
|
||||
defer log.Trace(d, "name=%q", req.Name)("err=%v", &err)
|
||||
err = d.Dir.RemoveName(req.Name)
|
||||
|
||||
name := req.Name
|
||||
|
||||
if d.VFS().Opt.Links {
|
||||
node, err := fallbackStat(d.Dir, name)
|
||||
|
||||
if err == nil {
|
||||
name = node.Name()
|
||||
}
|
||||
}
|
||||
|
||||
err = d.Dir.RemoveName(name)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
@@ -202,7 +227,22 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
|
||||
return fmt.Errorf("unknown Dir type %T", newDir)
|
||||
}
|
||||
|
||||
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
|
||||
oldName := req.OldName
|
||||
newName := req.NewName
|
||||
|
||||
if d.VFS().Opt.Links {
|
||||
node, err := fallbackStat(d.Dir, oldName)
|
||||
|
||||
if err == nil {
|
||||
oldName = node.Name()
|
||||
|
||||
if strings.HasSuffix(oldName, fs.LinkSuffix) {
|
||||
newName += fs.LinkSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = d.Dir.Rename(oldName, newName, destDir.Dir)
|
||||
if err != nil {
|
||||
return translateError(err)
|
||||
}
|
||||
@@ -240,6 +280,53 @@ func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node)
|
||||
return nil, syscall.ENOSYS
|
||||
}
|
||||
|
||||
var _ fusefs.NodeSymlinker = (*Dir)(nil)
|
||||
|
||||
// Symlink create a symbolic link.
|
||||
func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) {
|
||||
defer log.Trace(d, "Requested to symlink newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err)
|
||||
|
||||
newName := path.Join(d.Path(), req.NewName)
|
||||
target := req.Target
|
||||
|
||||
if d.VFS().Opt.Links {
|
||||
// The user must NOT provide .rclonelink suffix
|
||||
if strings.HasSuffix(newName, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", newName)
|
||||
return nil, vfs.EINVAL
|
||||
}
|
||||
|
||||
newName += fs.LinkSuffix
|
||||
} else {
|
||||
// The user must provide .rclonelink suffix
|
||||
if !strings.HasSuffix(newName, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", newName)
|
||||
return nil, vfs.EINVAL
|
||||
}
|
||||
}
|
||||
|
||||
// Add target suffix when linking to a link
|
||||
if !strings.HasSuffix(target, fs.LinkSuffix) {
|
||||
vnode, err := fallbackStat(d.Dir, target)
|
||||
if err == nil && strings.HasSuffix(vnode.Name(), fs.LinkSuffix) {
|
||||
target += fs.LinkSuffix
|
||||
}
|
||||
}
|
||||
|
||||
err = d.VFS().Symlink(target, newName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n, err := d.Stat(path.Base(newName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
node = &File{n.(*vfs.File), d.fsys}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// Check interface satisfied
|
||||
var _ fusefs.NodeMknoder = (*Dir)(nil)
|
||||
|
||||
|
||||
@@ -5,11 +5,14 @@ package mount
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
)
|
||||
@@ -32,7 +35,7 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
|
||||
Blocks := (Size + 511) / 512
|
||||
a.Gid = f.VFS().Opt.GID
|
||||
a.Uid = f.VFS().Opt.UID
|
||||
a.Mode = f.VFS().Opt.FilePerms
|
||||
a.Mode = f.File.Mode() &^ os.ModeAppend
|
||||
a.Size = Size
|
||||
a.Atime = modTime
|
||||
a.Mtime = modTime
|
||||
@@ -126,3 +129,32 @@ func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) er
|
||||
}
|
||||
|
||||
var _ fusefs.NodeRemovexattrer = (*File)(nil)
|
||||
|
||||
var _ fusefs.NodeReadlinker = (*File)(nil)
|
||||
|
||||
// Readlink read symbolic link target.
|
||||
func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) {
|
||||
defer log.Trace(f, "Requested to read link")("ret=%v, err=%v", &ret, &err)
|
||||
|
||||
path := f.Path()
|
||||
|
||||
if f.VFS().Opt.Links {
|
||||
// The user must NOT provide .rclonelink suffix
|
||||
// if strings.HasSuffix(path, fs.LinkSuffix) {
|
||||
// fs.Errorf(nil, "Invalid name suffix provided: %v", path)
|
||||
// return "", vfs.EINVAL
|
||||
// }
|
||||
|
||||
// path += fs.LinkSuffix
|
||||
} else {
|
||||
// The user must provide .rclonelink suffix
|
||||
if !strings.HasSuffix(path, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", path)
|
||||
return "", vfs.EINVAL
|
||||
}
|
||||
}
|
||||
|
||||
ret, err = f.VFS().Readlink(path)
|
||||
ret, _ = f.VFS().TrimSymlink(ret)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -60,11 +59,11 @@ func randomSeekTest(size int64, in1, in2 *os.File, file1, file2 string) {
|
||||
|
||||
if !bytes.Equal(buf1, buf2) {
|
||||
log.Printf("Dumping different blocks")
|
||||
err = ioutil.WriteFile("/tmp/z1", buf1, 0777)
|
||||
err = os.WriteFile("/tmp/z1", buf1, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z1: %v", err)
|
||||
}
|
||||
err = ioutil.WriteFile("/tmp/z2", buf2, 0777)
|
||||
err = os.WriteFile("/tmp/z2", buf2, 0777)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to write /tmp/z2: %v", err)
|
||||
}
|
||||
|
||||
@@ -51,15 +51,39 @@ func (f *FS) SetDebug(debug bool) {
|
||||
|
||||
// get the Mode from a vfs Node
|
||||
func getMode(node os.FileInfo) uint32 {
|
||||
Mode := node.Mode().Perm()
|
||||
if node.IsDir() {
|
||||
vfsMode := node.Mode()
|
||||
Mode := vfsMode.Perm()
|
||||
if vfsMode&os.ModeDir != 0 {
|
||||
Mode |= fuse.S_IFDIR
|
||||
} else if vfsMode&os.ModeSymlink != 0 {
|
||||
Mode |= fuse.S_IFLNK
|
||||
} else if vfsMode&os.ModeNamedPipe != 0 {
|
||||
Mode |= fuse.S_IFIFO
|
||||
} else {
|
||||
Mode |= fuse.S_IFREG
|
||||
}
|
||||
return uint32(Mode)
|
||||
}
|
||||
|
||||
// convert fuse mode to os.FileMode
|
||||
// func getFileMode(mode uint32) os.FileMode {
|
||||
// osMode := os.FileMode(0)
|
||||
// if mode&fuse.S_IFDIR != 0 {
|
||||
// mode ^= fuse.S_IFDIR
|
||||
// osMode |= os.ModeDir
|
||||
// } else if mode&fuse.S_IFREG != 0 {
|
||||
// mode ^= fuse.S_IFREG
|
||||
// } else if mode&fuse.S_IFLNK != 0 {
|
||||
// mode ^= fuse.S_IFLNK
|
||||
// osMode |= os.ModeSymlink
|
||||
// } else if mode&fuse.S_IFIFO != 0 {
|
||||
// mode ^= fuse.S_IFIFO
|
||||
// osMode |= os.ModeNamedPipe
|
||||
// }
|
||||
// osMode |= os.FileMode(mode)
|
||||
// return osMode
|
||||
// }
|
||||
|
||||
// fill in attr from node
|
||||
func setAttr(node vfs.Node, attr *fuse.Attr) {
|
||||
Size := uint64(node.Size())
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
fusefs "github.com/hanwen/go-fuse/v2/fs"
|
||||
@@ -56,6 +57,9 @@ func (n *Node) lookupVfsNodeInDir(leaf string) (vfsNode vfs.Node, errno syscall.
|
||||
return nil, syscall.ENOTDIR
|
||||
}
|
||||
vfsNode, err := dir.Stat(leaf)
|
||||
if err == vfs.ENOENT && dir.VFS().Opt.Links {
|
||||
vfsNode, err = dir.Stat(leaf + fs.LinkSuffix)
|
||||
}
|
||||
return vfsNode, translateError(err)
|
||||
}
|
||||
|
||||
@@ -219,6 +223,7 @@ func (n *Node) Opendir(ctx context.Context) syscall.Errno {
|
||||
var _ = (fusefs.NodeOpendirer)((*Node)(nil))
|
||||
|
||||
type dirStream struct {
|
||||
fsys *FS
|
||||
nodes []os.FileInfo
|
||||
i int
|
||||
}
|
||||
@@ -226,7 +231,7 @@ type dirStream struct {
|
||||
// HasNext indicates if there are further entries. HasNext
|
||||
// might be called on already closed streams.
|
||||
func (ds *dirStream) HasNext() bool {
|
||||
return ds.i < len(ds.nodes)
|
||||
return ds.i < len(ds.nodes)+2
|
||||
}
|
||||
|
||||
// Next retrieves the next entry. It is only called if HasNext
|
||||
@@ -234,14 +239,30 @@ func (ds *dirStream) HasNext() bool {
|
||||
// indicate I/O errors
|
||||
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
|
||||
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
|
||||
fi := ds.nodes[ds.i]
|
||||
if ds.i == 0 {
|
||||
ds.i++
|
||||
return fuse.DirEntry{
|
||||
Mode: fuse.S_IFDIR,
|
||||
Name: ".",
|
||||
Ino: 0, // FIXME
|
||||
}, 0
|
||||
} else if ds.i == 1 {
|
||||
ds.i++
|
||||
return fuse.DirEntry{
|
||||
Mode: fuse.S_IFDIR,
|
||||
Name: "..",
|
||||
Ino: 0, // FIXME
|
||||
}, 0
|
||||
}
|
||||
fi := ds.nodes[ds.i-2]
|
||||
name, _ := ds.fsys.VFS.TrimSymlink(path.Base(fi.Name()))
|
||||
de = fuse.DirEntry{
|
||||
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
|
||||
// are considered.
|
||||
Mode: getMode(fi),
|
||||
|
||||
// Name is the basename of the file in the directory.
|
||||
Name: path.Base(fi.Name()),
|
||||
Name: name,
|
||||
|
||||
// Ino is the inode number.
|
||||
Ino: 0, // FIXME
|
||||
@@ -289,6 +310,7 @@ func (n *Node) Readdir(ctx context.Context) (ds fusefs.DirStream, errno syscall.
|
||||
}
|
||||
return &dirStream{
|
||||
nodes: items,
|
||||
fsys: n.fsys,
|
||||
}, 0
|
||||
}
|
||||
|
||||
@@ -326,6 +348,8 @@ func (n *Node) Create(ctx context.Context, name string, flags uint32, mode uint3
|
||||
}
|
||||
// translate the fuse flags to os flags
|
||||
osFlags := int(flags) | os.O_CREATE
|
||||
// translate the fuse mode to os mode
|
||||
//osMode := getFileMode(mode)
|
||||
file, err := dir.Create(name, osFlags)
|
||||
if err != nil {
|
||||
return nil, nil, 0, translateError(err)
|
||||
@@ -401,7 +425,101 @@ func (n *Node) Rename(ctx context.Context, oldName string, newParent fusefs.Inod
|
||||
if !ok {
|
||||
return syscall.ENOTDIR
|
||||
}
|
||||
if oldDir.VFS().Opt.Links {
|
||||
node, err := n.lookupVfsNodeInDir(oldName)
|
||||
|
||||
if err == 0 {
|
||||
oldName = node.Name()
|
||||
|
||||
if strings.HasSuffix(oldName, fs.LinkSuffix) {
|
||||
newName += fs.LinkSuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
return translateError(oldDir.Rename(oldName, newName, newDir))
|
||||
}
|
||||
|
||||
var _ = (fusefs.NodeRenamer)((*Node)(nil))
|
||||
|
||||
var _ fusefs.NodeReadlinker = (*Node)(nil)
|
||||
|
||||
// Readlink read symbolic link target.
|
||||
func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) {
|
||||
defer log.Trace(n, "Requested to read link")("ret=%v, err=%v", &ret, &err)
|
||||
|
||||
path := n.node.Path()
|
||||
|
||||
if n.node.VFS().Opt.Links {
|
||||
// The user must NOT provide .rclonelink suffix
|
||||
// if strings.HasSuffix(path, fs.LinkSuffix) {
|
||||
// fs.Errorf(nil, "Invalid name suffix provided: %v", path)
|
||||
// return nil, translateError(vfs.EINVAL)
|
||||
// }
|
||||
|
||||
// path += fs.LinkSuffix
|
||||
} else {
|
||||
// The user must provide .rclonelink suffix
|
||||
if !strings.HasSuffix(path, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", path)
|
||||
return nil, translateError(vfs.EINVAL)
|
||||
}
|
||||
}
|
||||
|
||||
s, serr := n.node.VFS().Readlink(path)
|
||||
if serr != nil {
|
||||
return nil, translateError(serr)
|
||||
}
|
||||
s, _ = n.node.VFS().TrimSymlink(s)
|
||||
return []byte(s), 0
|
||||
}
|
||||
|
||||
var _ fusefs.NodeSymlinker = (*Node)(nil)
|
||||
|
||||
// Symlink create symbolic link.
|
||||
func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) {
|
||||
defer log.Trace(n, "Requested to symlink name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err)
|
||||
|
||||
name = path.Join(n.node.Path(), name)
|
||||
|
||||
if n.node.VFS().Opt.Links {
|
||||
// The user must NOT provide .rclonelink suffix
|
||||
if strings.HasSuffix(name, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", name)
|
||||
return nil, translateError(vfs.EINVAL)
|
||||
}
|
||||
|
||||
name += fs.LinkSuffix
|
||||
} else {
|
||||
// The user must provide .rclonelink suffix
|
||||
if !strings.HasSuffix(name, fs.LinkSuffix) {
|
||||
fs.Errorf(nil, "Invalid name suffix provided: %v", name)
|
||||
return nil, translateError(vfs.EINVAL)
|
||||
}
|
||||
}
|
||||
|
||||
// Add target suffix when linking to a link
|
||||
if !strings.HasSuffix(target, fs.LinkSuffix) {
|
||||
vnode, err := n.lookupVfsNodeInDir(target)
|
||||
if err == 0 && strings.HasSuffix(vnode.Name(), fs.LinkSuffix) {
|
||||
target += fs.LinkSuffix
|
||||
}
|
||||
}
|
||||
|
||||
serr := n.node.VFS().Symlink(target, name)
|
||||
if serr != nil {
|
||||
return nil, translateError(serr)
|
||||
}
|
||||
|
||||
// Find the created node
|
||||
vfsNode, err := n.lookupVfsNodeInDir(path.Base(name))
|
||||
if err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.fsys.setEntryOut(vfsNode, out)
|
||||
newNode := newNode(n.fsys, vfsNode)
|
||||
fs.Debugf(nil, "attr=%#v", out.Attr)
|
||||
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
|
||||
|
||||
return newInode, 0
|
||||
}
|
||||
|
||||
@@ -157,6 +157,9 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(mountHelp, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package mountlib_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -36,7 +35,7 @@ func TestRc(t *testing.T) {
|
||||
assert.NotNil(t, getMountTypes)
|
||||
|
||||
localDir := t.TempDir()
|
||||
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
err := os.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
|
||||
require.NoError(t, err)
|
||||
|
||||
mountPoint := t.TempDir()
|
||||
|
||||
@@ -60,6 +60,9 @@ can speed transfers up greatly.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.19",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
|
||||
@@ -49,6 +49,9 @@ successful transfer.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user