mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
102 Commits
fix-vfs-la
...
v1.61-stab
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
486e713337 | ||
|
|
46e96918dc | ||
|
|
639b61de95 | ||
|
|
b03ee4e9e7 | ||
|
|
176af2b217 | ||
|
|
6be0644178 | ||
|
|
0ce5e57c30 | ||
|
|
bc214291d5 | ||
|
|
d3e09d86e0 | ||
|
|
5a9706ab61 | ||
|
|
cce4340d48 | ||
|
|
577693e501 | ||
|
|
2001cc0831 | ||
|
|
a35490bf70 | ||
|
|
01877e5a0f | ||
|
|
614d79121a | ||
|
|
3a6f1f5cd7 | ||
|
|
4a31961c4f | ||
|
|
7be9855a70 | ||
|
|
6f8112ff67 | ||
|
|
67fc227684 | ||
|
|
7edb4c0162 | ||
|
|
5db4493557 | ||
|
|
a85c0b0cc2 | ||
|
|
52443c2444 | ||
|
|
4444d2d102 | ||
|
|
08a1ca434b | ||
|
|
a9ce86f9a3 | ||
|
|
3167292c2f | ||
|
|
ec7cc2b3c3 | ||
|
|
2a2fcf1012 | ||
|
|
6d62267227 | ||
|
|
dfd8ad2fff | ||
|
|
43506f8086 | ||
|
|
ec3cee89d3 | ||
|
|
a171497a8b | ||
|
|
c6ad15e3b8 | ||
|
|
9a81885b51 | ||
|
|
3d291da0f6 | ||
|
|
43bf177ff7 | ||
|
|
c446651be8 | ||
|
|
6c407dbe15 | ||
|
|
5a59b49b6b | ||
|
|
8b9f3bbe29 | ||
|
|
8e6a469f98 | ||
|
|
f650a543ef | ||
|
|
683178a1f4 | ||
|
|
3937233e1e | ||
|
|
c571200812 | ||
|
|
04a663829b | ||
|
|
6b4a2c1c4e | ||
|
|
f73be767a4 | ||
|
|
4120dffcc1 | ||
|
|
53ff5bb205 | ||
|
|
397f428c48 | ||
|
|
c5a2c9b046 | ||
|
|
b98d7f6634 | ||
|
|
beea4d5119 | ||
|
|
8e507075d1 | ||
|
|
be783a1856 | ||
|
|
450c366403 | ||
|
|
1dbdc48a77 | ||
|
|
d7cb17848d | ||
|
|
f3c8b7a948 | ||
|
|
914fbe242c | ||
|
|
f746b2fe85 | ||
|
|
a131da2c35 | ||
|
|
60e4cb6f6f | ||
|
|
0a8b1fe5de | ||
|
|
b24c83db21 | ||
|
|
4f386a1ccd | ||
|
|
ab849b3613 | ||
|
|
10aee3926a | ||
|
|
4583b61e3d | ||
|
|
483e9e1ee3 | ||
|
|
c2dfc3e5b3 | ||
|
|
a9bd0c8de6 | ||
|
|
1628ca0d46 | ||
|
|
313493d51b | ||
|
|
6d18f60725 | ||
|
|
d74662a751 | ||
|
|
d05fd2a14f | ||
|
|
097be753ab | ||
|
|
50c9678cea | ||
|
|
7672cde4f3 | ||
|
|
a4c65532ea | ||
|
|
46b080c092 | ||
|
|
0edf6478e3 | ||
|
|
f7cdf318db | ||
|
|
6f3682c12f | ||
|
|
e3d593d40c | ||
|
|
83551bb02e | ||
|
|
430bf0d5eb | ||
|
|
dd71f5d968 | ||
|
|
7db1c506f2 | ||
|
|
959cd938bc | ||
|
|
03b07c280c | ||
|
|
705e8f2fe0 | ||
|
|
591fc3609a | ||
|
|
b4a3d1b9ed | ||
|
|
84219b95ab | ||
|
|
2c78f56d48 |
2610
MANUAL.html
generated
2610
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2907
MANUAL.txt
generated
2907
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
3
Makefile
3
Makefile
@@ -81,6 +81,9 @@ quicktest:
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
|
||||
@@ -50,6 +50,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -17,10 +16,12 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -32,36 +33,6 @@ var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(io.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
61
backend/cache/cache_internal_test.go
vendored
61
backend/cache/cache_internal_test.go
vendored
@@ -101,14 +101,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
// Instantiate inner fs
|
||||
innerFolder := "inner"
|
||||
runInstance.mkdir(t, rootFs, innerFolder)
|
||||
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
||||
|
||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||
listRoot, err := runInstance.list(t, rootFs, "")
|
||||
@@ -225,8 +223,7 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -258,8 +255,7 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
|
||||
func TestInternalObjNotFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
@@ -269,8 +265,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -297,8 +292,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||
@@ -316,8 +310,7 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
var err error
|
||||
|
||||
// create some rand test data
|
||||
@@ -346,8 +339,7 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -377,8 +369,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -404,8 +395,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -459,8 +449,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
|
||||
func TestInternalMoveWithNotify(t *testing.T) {
|
||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -546,8 +535,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -633,8 +621,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -666,8 +653,7 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -688,8 +674,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -724,8 +709,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -762,9 +746,7 @@ func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skipf("skipping crypt")
|
||||
@@ -865,7 +847,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
return enc
|
||||
}
|
||||
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
@@ -958,10 +940,15 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
runInstance.cleanupFs(t, f)
|
||||
})
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
|
||||
24
backend/cache/cache_upload_test.go
vendored
24
backend/cache/cache_upload_test.go
vendored
@@ -21,10 +21,8 @@ import (
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
@@ -63,9 +61,7 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
@@ -73,19 +69,15 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -119,10 +111,8 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -162,10 +152,8 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
@@ -213,9 +201,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
@@ -343,9 +329,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
|
||||
@@ -631,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
|
||||
@@ -1047,10 +1047,11 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is an operations.OverrideRemote
|
||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
// Otherwise don't unwrap any further
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
|
||||
@@ -17,41 +17,28 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
}
|
||||
return localFs, cleanup
|
||||
})
|
||||
return localFs
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
cleanup = func() {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
}
|
||||
return obj, cleanup
|
||||
})
|
||||
return obj
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
@@ -65,11 +52,9 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
obj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
@@ -83,7 +68,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in an fs.ObjectUnwrapper if required
|
||||
oi = testWrapper{oi}
|
||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
@@ -116,16 +101,13 @@ func testComputeHash(t *testing.T, f *Fs) {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
localFs := makeTempLocalFs(t)
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
localObj := uploadFile(t, localFs, path, contents)
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
remoteObj := uploadFile(t, f, path, contents)
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -3431,13 +3430,12 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
name := fspath.MakeConfigName(drive.Name)
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
|
||||
@@ -70,7 +70,7 @@ func init() {
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
than port 21. Cannot be used in combination with explicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
@@ -78,7 +78,7 @@ than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
@@ -657,8 +657,7 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
if remote == "" || remote == "." || remote == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
@@ -666,13 +665,32 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
|
||||
// returns TRUE if MLST is supported which is required to call GetEntry
|
||||
if c.IsTimePreciseInList() {
|
||||
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
err = translateErrorFile(err)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
f.entryToStandard(entry)
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -691,7 +709,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -713,7 +731,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
@@ -857,32 +875,18 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
file, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
return nil, err
|
||||
} else if file != nil {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -56,7 +55,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
@@ -221,7 +220,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
|
||||
@@ -33,8 +33,9 @@ var (
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// prepareServer prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepareServer(t *testing.T) configmap.Simple {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
@@ -78,20 +79,21 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
return m
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
// prepare prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepare(t *testing.T) fs.Fs {
|
||||
m := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
return f
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
@@ -134,22 +136,19 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
f := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
@@ -166,8 +165,7 @@ func TestListSubDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -194,8 +192,7 @@ func TestNewObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
for _, head := range []bool{false, true} {
|
||||
if !head {
|
||||
@@ -257,8 +254,7 @@ func TestOpen(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
f := prepare(t)
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -269,8 +265,7 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
@@ -279,8 +274,7 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
m := prepareServer(t)
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
@@ -33,7 +33,6 @@ func TestMain(m *testing.M) {
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
@@ -78,7 +77,6 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
func TestSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
@@ -177,7 +175,6 @@ func TestSymlinkError(t *testing.T) {
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -208,7 +205,6 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -237,7 +233,6 @@ func TestHashOnDelete(t *testing.T) {
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
@@ -372,7 +367,6 @@ func TestMetadata(t *testing.T) {
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
|
||||
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -105,7 +105,7 @@ func init() {
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "Digital Ocean Spaces",
|
||||
Help: "DigitalOcean Spaces",
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
@@ -124,6 +124,9 @@ func init() {
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Liara",
|
||||
Help: "Liara Object Storage",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -437,7 +440,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -762,6 +765,15 @@ func init() {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Liara Object Storage API.",
|
||||
Provider: "Liara",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "storage.iran.liara.space",
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
@@ -924,17 +936,11 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint of the Shared Gateway.",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
Provider: "Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gateway.eu1.storjshare.io",
|
||||
Help: "EU1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.us1.storjshare.io",
|
||||
Help: "US1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.ap1.storjshare.io",
|
||||
Help: "Asia-Pacific Shared Gateway",
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
@@ -1092,22 +1098,34 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
Value: "syd1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Sydney 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "nyc3.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces New York 3",
|
||||
Help: "DigitalOcean Spaces New York 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "ams3.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Amsterdam 3",
|
||||
Help: "DigitalOcean Spaces Amsterdam 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "Digital Ocean Spaces Singapore 1",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
@@ -1177,6 +1195,10 @@ func init() {
|
||||
Value: "s3.ap-southeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 2 (Sydney)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "storage.iran.liara.space",
|
||||
Help: "Liara Iran endpoint",
|
||||
Provider: "Liara",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
@@ -1560,7 +1582,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1793,6 +1815,15 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://liara.ir/landing/object-storage
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Liara",
|
||||
Provider: "Liara",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "storage_class",
|
||||
@@ -2764,6 +2795,10 @@ func setQuirks(opt *Options) {
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Liara":
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
@@ -3027,6 +3062,17 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
return info, versionID, nil
|
||||
}
|
||||
|
||||
// stringClonePointer clones the string pointed to by sp into new
|
||||
// memory. This is useful to stop us keeping references to small
|
||||
// strings carved out of large XML responses.
|
||||
func stringClonePointer(sp *string) *string {
|
||||
if sp == nil {
|
||||
return nil
|
||||
}
|
||||
var s = *sp
|
||||
return &s
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error ErrorObjectNotFound.
|
||||
@@ -3052,8 +3098,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = info.StorageClass
|
||||
o.versionID = versionID
|
||||
o.storageClass = stringClonePointer(info.StorageClass)
|
||||
o.versionID = stringClonePointer(versionID)
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -3194,6 +3240,9 @@ func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if aws.BoolValue(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") {
|
||||
return nil, nil, errors.New("s3 protocol error: received listing v2 with IsTruncated set and no NextContinuationToken. Should you be using `--s3-list-version 1`?")
|
||||
}
|
||||
ls.req.ContinuationToken = resp.NextContinuationToken
|
||||
return resp, nil, nil
|
||||
}
|
||||
@@ -5452,7 +5501,12 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.versionID = versionID
|
||||
// Only record versionID if we are using --s3-versions or --s3-version-at
|
||||
if o.fs.opt.Versions || o.fs.opt.VersionAt.IsSet() {
|
||||
o.versionID = versionID
|
||||
} else {
|
||||
o.versionID = nil
|
||||
}
|
||||
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
|
||||
@@ -1775,11 +1775,14 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
|
||||
// statRemote stats the file or directory at the remote given
|
||||
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||
absPath := remote
|
||||
if !strings.HasPrefix(remote, "/") {
|
||||
absPath = path.Join(f.absRoot, remote)
|
||||
}
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat: %w", err)
|
||||
}
|
||||
absPath := path.Join(f.absRoot, remote)
|
||||
info, err = c.sftpClient.Stat(absPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
return info, err
|
||||
|
||||
@@ -103,6 +103,10 @@ func (f *Fs) getSessions() int32 {
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
ctx = context.Background()
|
||||
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
|
||||
@@ -478,11 +478,15 @@ func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo)
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
dir := path.Dir(_path)
|
||||
if dir == "." {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path.Dir(_path)), 0o755)
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -160,6 +160,7 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -720,3 +721,43 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Copy parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.CopyObjectOptions{}
|
||||
|
||||
// Do the copy
|
||||
newObject, err := f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
newObject, err = f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the new object
|
||||
return newObjectFromUplink(f, remote, newObject), nil
|
||||
}
|
||||
|
||||
@@ -991,6 +991,7 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
}
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
@@ -1013,9 +1014,10 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
// Direct the MOVE/COPY to the source server
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
resp, err = srcFs.srv.Call(ctx, &opts)
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||
@@ -1109,9 +1111,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
// Direct the MOVE/COPY to the source server
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
resp, err = srcFs.srv.Call(ctx, &opts)
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("DirMove MOVE call failed: %w", err)
|
||||
|
||||
17
bin/backend-versions.sh
Executable file
17
bin/backend-versions.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# This adds the version each backend was released to its docs page
|
||||
set -e
|
||||
for backend in $( find backend -maxdepth 1 -type d ); do
|
||||
backend=$(basename $backend)
|
||||
if [[ "$backend" == "backend" || "$backend" == "vfs" || "$backend" == "all" || "$backend" == "azurefile" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||
if [ "$commit" == "" ]; then
|
||||
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||
fi
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||
echo $backend $version
|
||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||
done
|
||||
@@ -93,6 +93,9 @@ provided by a backend. Where the value is unlimited it is omitted.
|
||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -30,6 +30,9 @@ rclone config.
|
||||
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
|
||||
@@ -58,6 +58,9 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
name, remote := args[0], args[1]
|
||||
|
||||
@@ -115,6 +115,9 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "bisync remote1:path1 remote2:path2",
|
||||
Short: shortHelp,
|
||||
Long: longHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
@@ -25,6 +25,10 @@ var commandDefinition = &cobra.Command{
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
"status": "Deprecated",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
|
||||
|
||||
@@ -57,6 +57,9 @@ the end and |--offset| and |--count| to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
|--offset -1 --count 1| is equivalent to |--tail 1|.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
usedHead := head > 0
|
||||
|
||||
@@ -37,6 +37,9 @@ that don't support hashes or if you really want to check all the data.
|
||||
|
||||
Note that hash values in the SUM file are treated as case insensitive.
|
||||
`, "|", "`") + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 3, command, args)
|
||||
var hashType hash.Type
|
||||
|
||||
@@ -20,6 +20,9 @@ var commandDefinition = &cobra.Command{
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -44,6 +44,9 @@ var configCommand = &cobra.Command{
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.EditConfig(context.Background())
|
||||
@@ -54,12 +57,18 @@ var configEditCommand = &cobra.Command{
|
||||
Use: "edit",
|
||||
Short: configCommand.Short,
|
||||
Long: configCommand.Long,
|
||||
Run: configCommand.Run,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: configCommand.Run,
|
||||
}
|
||||
|
||||
var configFileCommand = &cobra.Command{
|
||||
Use: "file",
|
||||
Short: `Show path of configuration file in use.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.ShowConfigLocation()
|
||||
@@ -69,6 +78,9 @@ var configFileCommand = &cobra.Command{
|
||||
var configTouchCommand = &cobra.Command{
|
||||
Use: "touch",
|
||||
Short: `Ensure configuration file exists.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.SaveConfig()
|
||||
@@ -78,6 +90,9 @@ var configTouchCommand = &cobra.Command{
|
||||
var configPathsCommand = &cobra.Command{
|
||||
Use: "paths",
|
||||
Short: `Show paths used for configuration, cache, temp etc.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.57",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
fmt.Printf("Config file: %s\n", config.GetConfigPath())
|
||||
@@ -89,6 +104,9 @@ var configPathsCommand = &cobra.Command{
|
||||
var configShowCommand = &cobra.Command{
|
||||
Use: "show [<remote>]",
|
||||
Short: `Print (decrypted) config file, or the config for a single remote.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 {
|
||||
@@ -103,6 +121,9 @@ var configShowCommand = &cobra.Command{
|
||||
var configDumpCommand = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: `Dump the config file as JSON.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.Dump()
|
||||
@@ -112,6 +133,9 @@ var configDumpCommand = &cobra.Command{
|
||||
var configProvidersCommand = &cobra.Command{
|
||||
Use: "providers",
|
||||
Short: `List in JSON format all the providers and options.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.JSONListProviders()
|
||||
@@ -152,7 +176,7 @@ This will look something like (some irrelevant detail removed):
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
@@ -226,6 +250,9 @@ using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
in, err := argsToMap(args[2:])
|
||||
@@ -289,6 +316,9 @@ require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
@@ -304,6 +334,9 @@ require this add an extra parameter thus:
|
||||
var configDeleteCommand = &cobra.Command{
|
||||
Use: "delete name",
|
||||
Short: "Delete an existing remote.",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
config.DeleteRemote(args[0])
|
||||
@@ -326,6 +359,9 @@ For example, to set password of a remote of name myremote you would do:
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
|
||||
@@ -46,6 +46,9 @@ the destination.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
@@ -51,6 +51,9 @@ destination if there is one with the same name.
|
||||
Setting ` + "`--stdout`" + ` or making the output file name ` + "`-`" + `
|
||||
will cause the output to be written to standard output.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
|
||||
@@ -47,6 +47,9 @@ the files in remote:path.
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
|
||||
@@ -41,6 +41,9 @@ use it like this
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 11, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -135,6 +135,9 @@ Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
if len(args) > 1 {
|
||||
|
||||
@@ -53,6 +53,9 @@ delete all files bigger than 100 MiB.
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -22,6 +22,9 @@ Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
|
||||
@@ -17,4 +17,7 @@ var completionDefinition = &cobra.Command{
|
||||
Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ type frontmatter struct {
|
||||
Slug string
|
||||
URL string
|
||||
Source string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
@@ -38,6 +39,9 @@ title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
slug: {{ .Slug }}
|
||||
url: {{ .URL }}
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in {{ .Source }} and as part of making a release run "make commanddocs"
|
||||
---
|
||||
`))
|
||||
@@ -49,6 +53,9 @@ var commandDefinition = &cobra.Command{
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
@@ -75,17 +82,24 @@ rclone.org website.`,
|
||||
return err
|
||||
}
|
||||
|
||||
// Look up name => description for prepender
|
||||
var description = map[string]string{}
|
||||
var addDescription func(root *cobra.Command)
|
||||
addDescription = func(root *cobra.Command) {
|
||||
// Look up name => details for prepender
|
||||
type commandDetails struct {
|
||||
Short string
|
||||
Annotations map[string]string
|
||||
}
|
||||
var commands = map[string]commandDetails{}
|
||||
var addCommandDetails func(root *cobra.Command)
|
||||
addCommandDetails = func(root *cobra.Command) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
description[name] = root.Short
|
||||
commands[name] = commandDetails{
|
||||
Short: root.Short,
|
||||
Annotations: root.Annotations,
|
||||
}
|
||||
for _, c := range root.Commands() {
|
||||
addDescription(c)
|
||||
addCommandDetails(c)
|
||||
}
|
||||
}
|
||||
addDescription(cmd.Root)
|
||||
addCommandDetails(cmd.Root)
|
||||
|
||||
// markup for the docs files
|
||||
prepender := func(filename string) string {
|
||||
@@ -94,10 +108,11 @@ rclone.org website.`,
|
||||
data := frontmatter{
|
||||
Date: now,
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: description[name],
|
||||
Description: commands[name].Short,
|
||||
Slug: base,
|
||||
URL: "/commands/" + strings.ToLower(base) + "/",
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
Annotations: commands[name].Annotations,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := frontmatterTemplate.Execute(&buf, data)
|
||||
|
||||
@@ -112,6 +112,9 @@ Then
|
||||
|
||||
Note that hash names are case insensitive and values are output in lower case.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 2, command, args)
|
||||
if len(args) == 0 {
|
||||
|
||||
@@ -49,6 +49,9 @@ link. Exact capabilities depend on the remote, but the link will
|
||||
always by default be created with the least constraints – e.g. no
|
||||
expiry, no password protection, accessible without account.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc, remote := cmd.NewFsFile(args[0])
|
||||
|
||||
@@ -30,6 +30,9 @@ rclone listremotes lists all the available remotes from the config file.
|
||||
|
||||
When used with the ` + "`--long`" + ` flag it lists the types too.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.34",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := config.FileSections()
|
||||
|
||||
@@ -142,6 +142,9 @@ those only (without traversing the whole directory structure):
|
||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -112,6 +112,9 @@ will be shown ("2017-05-31T16:15:57+01:00").
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written one to a line.
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
var fsrc fs.Fs
|
||||
|
||||
@@ -31,6 +31,9 @@ Eg
|
||||
37600 2016-06-25 18:55:40.814629136 fubuwic
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -38,6 +38,9 @@ by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
as a relative path).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.MD5, args, 0); found {
|
||||
|
||||
@@ -157,6 +157,9 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(mountHelp, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
|
||||
@@ -60,6 +60,9 @@ can speed transfers up greatly.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.19",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
|
||||
@@ -49,6 +49,9 @@ successful transfer.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
234
cmd/ncdu/ncdu.go
234
cmd/ncdu/ncdu.go
@@ -13,13 +13,14 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/atotto/clipboard"
|
||||
"github.com/gdamore/tcell/v2/termbox"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
runewidth "github.com/mattn/go-runewidth"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/ncdu/scan"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rivo/uniseg"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -73,6 +74,9 @@ For a non-interactive listing of the remote, see the
|
||||
[tree](/commands/rclone_tree/) command. To just get the total size of
|
||||
the remote you can also use the [size](/commands/rclone_size/) command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
@@ -114,6 +118,7 @@ func helpText() (tr []string) {
|
||||
|
||||
// UI contains the state of the user interface
|
||||
type UI struct {
|
||||
s tcell.Screen
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
@@ -150,77 +155,91 @@ type dirPos struct {
|
||||
offset int
|
||||
}
|
||||
|
||||
// graphemeWidth returns the number of cells in rs.
|
||||
//
|
||||
// The original [runewidth.StringWidth] iterates through graphemes
|
||||
// and uses this same logic. To avoid iterating through graphemes
|
||||
// repeatedly, we separate that out into its own function.
|
||||
func graphemeWidth(rs []rune) (wd int) {
|
||||
// copied/adapted from [runewidth.StringWidth]
|
||||
for _, r := range rs {
|
||||
wd = runewidth.RuneWidth(r)
|
||||
if wd > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Print a string
|
||||
func Print(x, y int, fg, bg termbox.Attribute, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
func (u *UI) Print(x, y int, style tcell.Style, msg string) {
|
||||
g := uniseg.NewGraphemes(msg)
|
||||
for g.Next() {
|
||||
rs := g.Runes()
|
||||
u.s.SetContent(x, y, rs[0], rs[1:], style)
|
||||
x += graphemeWidth(rs)
|
||||
}
|
||||
}
|
||||
|
||||
// Printf a string
|
||||
func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface{}) {
|
||||
func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
Print(x, y, fg, bg, s)
|
||||
u.Print(x, y, style, s)
|
||||
}
|
||||
|
||||
// Line prints a string to given xmax, with given space
|
||||
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x += runewidth.RuneWidth(c)
|
||||
func (u *UI) Line(x, y, xmax int, style tcell.Style, spacer rune, msg string) {
|
||||
g := uniseg.NewGraphemes(msg)
|
||||
for g.Next() {
|
||||
rs := g.Runes()
|
||||
u.s.SetContent(x, y, rs[0], rs[1:], style)
|
||||
x += graphemeWidth(rs)
|
||||
if x >= xmax {
|
||||
return
|
||||
}
|
||||
}
|
||||
for ; x < xmax; x++ {
|
||||
termbox.SetCell(x, y, spacer, fg, bg)
|
||||
u.s.SetContent(x, y, spacer, nil, style)
|
||||
}
|
||||
}
|
||||
|
||||
// Linef a string
|
||||
func Linef(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, format string, args ...interface{}) {
|
||||
func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
Line(x, y, xmax, fg, bg, spacer, s)
|
||||
u.Line(x, y, xmax, style, spacer, s)
|
||||
}
|
||||
|
||||
// LineOptions Print line of selectable options
|
||||
func LineOptions(x, y, xmax int, fg, bg termbox.Attribute, options []string, selected int) {
|
||||
defaultBg := bg
|
||||
defaultFg := fg
|
||||
|
||||
// Print left+right whitespace to center the options
|
||||
xoffset := ((xmax - x) - lineOptionLength(options)) / 2
|
||||
for j := x; j < x+xoffset; j++ {
|
||||
termbox.SetCell(j, y, ' ', fg, bg)
|
||||
func (u *UI) LineOptions(x, y, xmax int, style tcell.Style, options []string, selected int) {
|
||||
for x := x; x < xmax; x++ {
|
||||
u.s.SetContent(x, y, ' ', nil, style) // fill
|
||||
}
|
||||
for j := xmax - xoffset; j < xmax; j++ {
|
||||
termbox.SetCell(j, y, ' ', fg, bg)
|
||||
}
|
||||
x += xoffset
|
||||
x += ((xmax - x) - lineOptionLength(options)) / 2 // center
|
||||
|
||||
for i, o := range options {
|
||||
termbox.SetCell(x, y, ' ', fg, bg)
|
||||
u.s.SetContent(x, y, ' ', nil, style)
|
||||
x++
|
||||
|
||||
ostyle := style
|
||||
if i == selected {
|
||||
bg = termbox.ColorBlack
|
||||
fg = termbox.ColorWhite
|
||||
}
|
||||
termbox.SetCell(x+1, y, '<', fg, bg)
|
||||
x += 2
|
||||
|
||||
// print option text
|
||||
for _, c := range o {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
ostyle = tcell.StyleDefault
|
||||
}
|
||||
|
||||
termbox.SetCell(x, y, '>', fg, bg)
|
||||
bg = defaultBg
|
||||
fg = defaultFg
|
||||
u.s.SetContent(x, y, '<', nil, ostyle)
|
||||
x++
|
||||
|
||||
termbox.SetCell(x+1, y, ' ', fg, bg)
|
||||
x += 2
|
||||
g := uniseg.NewGraphemes(o)
|
||||
for g.Next() {
|
||||
rs := g.Runes()
|
||||
u.s.SetContent(x, y, rs[0], rs[1:], ostyle)
|
||||
x += graphemeWidth(rs)
|
||||
}
|
||||
|
||||
u.s.SetContent(x, y, '>', nil, ostyle)
|
||||
x++
|
||||
|
||||
u.s.SetContent(x, y, ' ', nil, style)
|
||||
x++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,7 +253,7 @@ func lineOptionLength(o []string) int {
|
||||
|
||||
// Box the u.boxText onto the screen
|
||||
func (u *UI) Box() {
|
||||
w, h := termbox.Size()
|
||||
w, h := u.s.Size()
|
||||
|
||||
// Find dimensions of text
|
||||
boxWidth := 10
|
||||
@@ -260,31 +279,31 @@ func (u *UI) Box() {
|
||||
ymax := y + len(u.boxText)
|
||||
|
||||
// draw text
|
||||
fg, bg := termbox.ColorRed, termbox.ColorWhite
|
||||
style := tcell.StyleDefault.Background(tcell.ColorRed).Reverse(true)
|
||||
for i, s := range u.boxText {
|
||||
Line(x, y+i, xmax, fg, bg, ' ', s)
|
||||
fg = termbox.ColorBlack
|
||||
u.Line(x, y+i, xmax, style, ' ', s)
|
||||
style = tcell.StyleDefault.Reverse(true)
|
||||
}
|
||||
|
||||
if len(u.boxMenu) != 0 {
|
||||
u.LineOptions(x, ymax, xmax, style, u.boxMenu, u.boxMenuButton)
|
||||
ymax++
|
||||
LineOptions(x, ymax-1, xmax, fg, bg, u.boxMenu, u.boxMenuButton)
|
||||
}
|
||||
|
||||
// draw top border
|
||||
for i := y; i < ymax; i++ {
|
||||
termbox.SetCell(x-1, i, '│', fg, bg)
|
||||
termbox.SetCell(xmax, i, '│', fg, bg)
|
||||
u.s.SetContent(x-1, i, tcell.RuneVLine, nil, style)
|
||||
u.s.SetContent(xmax, i, tcell.RuneVLine, nil, style)
|
||||
}
|
||||
for j := x; j < xmax; j++ {
|
||||
termbox.SetCell(j, y-1, '─', fg, bg)
|
||||
termbox.SetCell(j, ymax, '─', fg, bg)
|
||||
u.s.SetContent(j, y-1, tcell.RuneHLine, nil, style)
|
||||
u.s.SetContent(j, ymax, tcell.RuneHLine, nil, style)
|
||||
}
|
||||
|
||||
termbox.SetCell(x-1, y-1, '┌', fg, bg)
|
||||
termbox.SetCell(xmax, y-1, '┐', fg, bg)
|
||||
termbox.SetCell(x-1, ymax, '└', fg, bg)
|
||||
termbox.SetCell(xmax, ymax, '┘', fg, bg)
|
||||
u.s.SetContent(x-1, y-1, tcell.RuneULCorner, nil, style)
|
||||
u.s.SetContent(xmax, y-1, tcell.RuneURCorner, nil, style)
|
||||
u.s.SetContent(x-1, ymax, tcell.RuneLLCorner, nil, style)
|
||||
u.s.SetContent(xmax, ymax, tcell.RuneLRCorner, nil, style)
|
||||
}
|
||||
|
||||
func (u *UI) moveBox(to int) {
|
||||
@@ -336,17 +355,17 @@ func (u *UI) hasEmptyDir() bool {
|
||||
// Draw the current screen
|
||||
func (u *UI) Draw() error {
|
||||
ctx := context.Background()
|
||||
w, h := termbox.Size()
|
||||
w, h := u.s.Size()
|
||||
u.dirListHeight = h - 3
|
||||
|
||||
// Plot
|
||||
termbox.Clear(termbox.ColorWhite, termbox.ColorBlack)
|
||||
u.s.Clear()
|
||||
|
||||
// Header line
|
||||
Linef(0, 0, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version)
|
||||
u.Linef(0, 0, w, tcell.StyleDefault.Reverse(true), ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version)
|
||||
|
||||
// Directory line
|
||||
Linef(0, 1, w, termbox.ColorWhite, termbox.ColorBlack, '-', "-- %s ", u.path)
|
||||
u.Linef(0, 1, w, tcell.StyleDefault, '-', "-- %s ", u.path)
|
||||
|
||||
// graphs
|
||||
const (
|
||||
@@ -377,20 +396,18 @@ func (u *UI) Draw() error {
|
||||
attrs, err = u.d.AttrI(u.sortPerm[n])
|
||||
}
|
||||
_, isSelected := u.selectedEntries[entry.String()]
|
||||
fg := termbox.ColorWhite
|
||||
style := tcell.StyleDefault
|
||||
if attrs.EntriesHaveErrors {
|
||||
fg = termbox.ColorYellow
|
||||
style = style.Foreground(tcell.ColorYellow)
|
||||
}
|
||||
if err != nil {
|
||||
fg = termbox.ColorRed
|
||||
style = style.Foreground(tcell.ColorRed)
|
||||
}
|
||||
const colorLightYellow = termbox.ColorYellow + 8
|
||||
if isSelected {
|
||||
fg = colorLightYellow
|
||||
style = style.Foreground(tcell.ColorLightYellow)
|
||||
}
|
||||
bg := termbox.ColorBlack
|
||||
if n == dirPos.entry {
|
||||
fg, bg = bg, fg
|
||||
style = style.Reverse(true)
|
||||
}
|
||||
mark := ' '
|
||||
if attrs.IsDir {
|
||||
@@ -449,31 +466,30 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
u.Linef(0, y, w, style, ' ', "%c %s %s%c%s%s",
|
||||
fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
|
||||
// Footer
|
||||
if u.d == nil {
|
||||
Line(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Waiting for root directory...")
|
||||
u.Line(0, h-1, w, tcell.StyleDefault.Reverse(true), ' ', "Waiting for root directory...")
|
||||
} else {
|
||||
message := ""
|
||||
if u.listing {
|
||||
message = " [listing in progress]"
|
||||
}
|
||||
size, count := u.d.Attr()
|
||||
Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %s, Objects: %s%s", operations.SizeString(size, u.humanReadable), operations.CountString(count, u.humanReadable), message)
|
||||
u.Linef(0, h-1, w, tcell.StyleDefault.Reverse(true), ' ', "Total usage: %s, Objects: %s%s",
|
||||
operations.SizeString(size, u.humanReadable), operations.CountString(count, u.humanReadable), message)
|
||||
}
|
||||
|
||||
// Show the box on top if required
|
||||
if u.showBox {
|
||||
u.Box()
|
||||
}
|
||||
err := termbox.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to flush screen: %w", err)
|
||||
}
|
||||
u.s.Show()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -886,37 +902,34 @@ func NewUI(f fs.Fs) *UI {
|
||||
|
||||
// Show shows the user interface
|
||||
func (u *UI) Show() error {
|
||||
err := termbox.Init()
|
||||
var err error
|
||||
u.s, err = tcell.NewScreen()
|
||||
if err != nil {
|
||||
return fmt.Errorf("termbox init: %w", err)
|
||||
return fmt.Errorf("screen new: %w", err)
|
||||
}
|
||||
defer termbox.Close()
|
||||
err = u.s.Init()
|
||||
if err != nil {
|
||||
return fmt.Errorf("screen init: %w", err)
|
||||
}
|
||||
defer u.s.Fini()
|
||||
|
||||
// scan the disk in the background
|
||||
u.listing = true
|
||||
rootChan, errChan, updated := scan.Scan(context.Background(), u.f)
|
||||
|
||||
// Poll the events into a channel
|
||||
events := make(chan termbox.Event)
|
||||
doneWithEvent := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
events <- termbox.PollEvent()
|
||||
<-doneWithEvent
|
||||
}
|
||||
}()
|
||||
events := make(chan tcell.Event)
|
||||
go u.s.ChannelEvents(events, nil)
|
||||
|
||||
// Main loop, waiting for events and channels
|
||||
outer:
|
||||
for {
|
||||
//Reset()
|
||||
err := u.Draw()
|
||||
if err != nil {
|
||||
return fmt.Errorf("draw failed: %w", err)
|
||||
}
|
||||
var root *scan.Dir
|
||||
select {
|
||||
case root = <-rootChan:
|
||||
case root := <-rootChan:
|
||||
u.root = root
|
||||
u.setCurrentDir(root)
|
||||
case err := <-errChan:
|
||||
@@ -926,39 +939,50 @@ outer:
|
||||
u.listing = false
|
||||
case <-updated:
|
||||
// redraw
|
||||
// might want to limit updates per second
|
||||
// TODO: might want to limit updates per second
|
||||
u.sortCurrentDir()
|
||||
case ev := <-events:
|
||||
doneWithEvent <- true
|
||||
if ev.Type == termbox.EventKey {
|
||||
switch ev.Key + termbox.Key(ev.Ch) {
|
||||
case termbox.KeyEsc, termbox.KeyCtrlC, 'q':
|
||||
switch ev := ev.(type) {
|
||||
case *tcell.EventResize:
|
||||
if u.root != nil {
|
||||
u.sortCurrentDir() // redraw
|
||||
}
|
||||
u.s.Sync()
|
||||
case *tcell.EventKey:
|
||||
var c rune
|
||||
if k := ev.Key(); k == tcell.KeyRune {
|
||||
c = ev.Rune()
|
||||
} else {
|
||||
c = key(k)
|
||||
}
|
||||
switch c {
|
||||
case key(tcell.KeyEsc), key(tcell.KeyCtrlC), 'q':
|
||||
if u.showBox {
|
||||
u.showBox = false
|
||||
} else {
|
||||
break outer
|
||||
}
|
||||
case termbox.KeyArrowDown, 'j':
|
||||
case key(tcell.KeyDown), 'j':
|
||||
u.move(1)
|
||||
case termbox.KeyArrowUp, 'k':
|
||||
case key(tcell.KeyUp), 'k':
|
||||
u.move(-1)
|
||||
case termbox.KeyPgdn, '-', '_':
|
||||
case key(tcell.KeyPgDn), '-', '_':
|
||||
u.move(u.dirListHeight)
|
||||
case termbox.KeyPgup, '=', '+':
|
||||
case key(tcell.KeyPgUp), '=', '+':
|
||||
u.move(-u.dirListHeight)
|
||||
case termbox.KeyArrowLeft, 'h':
|
||||
case key(tcell.KeyLeft), 'h':
|
||||
if u.showBox {
|
||||
u.moveBox(-1)
|
||||
break
|
||||
}
|
||||
u.up()
|
||||
case termbox.KeyEnter:
|
||||
case key(tcell.KeyEnter):
|
||||
if len(u.boxMenu) > 0 {
|
||||
u.handleBoxOption()
|
||||
break
|
||||
}
|
||||
u.enter()
|
||||
case termbox.KeyArrowRight, 'l':
|
||||
case key(tcell.KeyRight), 'l':
|
||||
if u.showBox {
|
||||
u.moveBox(1)
|
||||
break
|
||||
@@ -1001,11 +1025,8 @@ outer:
|
||||
|
||||
// Refresh the screen. Not obvious what key to map
|
||||
// this onto, but ^L is a common choice.
|
||||
case termbox.KeyCtrlL:
|
||||
err := termbox.Sync()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "termbox sync returned error: %v", err)
|
||||
}
|
||||
case key(tcell.KeyCtrlL):
|
||||
u.s.Sync()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1013,3 +1034,8 @@ outer:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// key returns a rune representing the key k. It is a negative value, to not collide with Unicode code-points.
|
||||
func key(k tcell.Key) rune {
|
||||
return rune(-k)
|
||||
}
|
||||
|
||||
@@ -42,6 +42,9 @@ obfuscating the hyphen itself.
|
||||
If you want to encrypt the config file then please use config file
|
||||
encryption - see [rclone config](/commands/rclone_config/) for more
|
||||
info.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
var password string
|
||||
|
||||
12
cmd/rc/rc.go
12
cmd/rc/rc.go
@@ -99,6 +99,9 @@ rclone rc server, e.g.:
|
||||
rclone rc --loopback operations/about fs=/
|
||||
|
||||
Use ` + "`rclone rc`" + ` to see a list of all possible commands.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1e9, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
@@ -153,6 +156,15 @@ func ParseOptions(options []string) (opt map[string]string) {
|
||||
func setAlternateFlag(flagName string, output *string) {
|
||||
if rcFlag := pflag.Lookup(flagName); rcFlag != nil && rcFlag.Changed {
|
||||
*output = rcFlag.Value.String()
|
||||
if sliceValue, ok := rcFlag.Value.(pflag.SliceValue); ok {
|
||||
stringSlice := sliceValue.GetSlice()
|
||||
for _, value := range stringSlice {
|
||||
if value != "" {
|
||||
*output = value
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -56,6 +56,9 @@ Note that the upload can also not be retried because the data is
|
||||
not kept around until the upload succeeds. If you need to transfer
|
||||
a lot of data, you're better off caching locally and then
|
||||
` + "`rclone move`" + ` it to the destination.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -31,7 +32,10 @@ for GET requests on the URL passed in. It will also open the URL in
|
||||
the browser when rclone is run.
|
||||
|
||||
See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
`,
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.45",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if rcflags.Opt.Enabled {
|
||||
|
||||
@@ -16,6 +16,9 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "reveal password",
|
||||
Short: `Reveal obscured password from rclone.conf`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -38,6 +38,9 @@ used with option ` + "`--rmdirs`" + `).
|
||||
To delete a path and any objects in it, use [purge](/commands/rclone_purge/)
|
||||
command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fdst := cmd.NewFsDir(args)
|
||||
|
||||
@@ -64,6 +64,9 @@ var cmdSelfUpdate = &cobra.Command{
|
||||
Aliases: []string{"self-update"},
|
||||
Short: `Update the rclone binary.`,
|
||||
Long: strings.ReplaceAll(selfUpdateHelp, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
if Opt.Package == "" {
|
||||
|
||||
@@ -15,8 +15,6 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -46,20 +44,6 @@ func TestGetVersion(t *testing.T) {
|
||||
assert.Equal(t, "v1.52.3", resultVer)
|
||||
}
|
||||
|
||||
func makeTestDir() (testDir string, err error) {
|
||||
const maxAttempts = 5
|
||||
testDirBase := filepath.Join(os.TempDir(), "rclone-test-selfupdate.")
|
||||
|
||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||
testDir = testDirBase + random.String(4)
|
||||
err = file.MkdirAll(testDir, os.ModePerm)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestInstallOnLinux(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
if runtime.GOOS != "linux" {
|
||||
@@ -68,13 +52,8 @@ func TestInstallOnLinux(t *testing.T) {
|
||||
|
||||
// Prepare for test
|
||||
ctx := context.Background()
|
||||
testDir, err := makeTestDir()
|
||||
assert.NoError(t, err)
|
||||
testDir := t.TempDir()
|
||||
path := filepath.Join(testDir, "rclone")
|
||||
defer func() {
|
||||
_ = os.Chmod(path, 0644)
|
||||
_ = os.RemoveAll(testDir)
|
||||
}()
|
||||
|
||||
regexVer := regexp.MustCompile(`v[0-9]\S+`)
|
||||
|
||||
@@ -87,6 +66,9 @@ func TestInstallOnLinux(t *testing.T) {
|
||||
// Must fail on non-writable file
|
||||
assert.NoError(t, os.WriteFile(path, []byte("test"), 0644))
|
||||
assert.NoError(t, os.Chmod(path, 0000))
|
||||
defer func() {
|
||||
_ = os.Chmod(path, 0644)
|
||||
}()
|
||||
err = (InstallUpdate(ctx, &Options{Beta: true, Output: path}))
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "run self-update as root")
|
||||
@@ -122,11 +104,7 @@ func TestRenameOnWindows(t *testing.T) {
|
||||
// Prepare for test
|
||||
ctx := context.Background()
|
||||
|
||||
testDir, err := makeTestDir()
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(testDir)
|
||||
}()
|
||||
testDir := t.TempDir()
|
||||
|
||||
path := filepath.Join(testDir, "rclone.exe")
|
||||
regexVer := regexp.MustCompile(`v[0-9]\S+`)
|
||||
|
||||
27
cmd/serve/dlna/LICENSE.anacrolix
Normal file
27
cmd/serve/dlna/LICENSE.anacrolix
Normal file
@@ -0,0 +1,27 @@
|
||||
This directory contains code derived from https://github.com/anacrolix/dms
|
||||
which is under the following license.
|
||||
|
||||
Copyright (c) 2012, Matt Joiner <anacrolix@gmail.com>.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the <organization> nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/anacrolix/dms/soap"
|
||||
"github.com/anacrolix/dms/ssdp"
|
||||
"github.com/anacrolix/dms/upnp"
|
||||
"github.com/anacrolix/log"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/dlna/data"
|
||||
"github.com/rclone/rclone/cmd/serve/dlna/dlnaflags"
|
||||
@@ -47,6 +48,9 @@ media transcoding support. This means that some players might show
|
||||
files that they are not able to play back correctly.
|
||||
|
||||
` + dlnaflags.Help + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.46",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -401,6 +405,7 @@ func (s *server) ssdpInterface(intf net.Interface) {
|
||||
Server: serverField,
|
||||
UUID: s.RootDeviceUUID,
|
||||
NotifyInterval: s.AnnounceInterval,
|
||||
Logger: log.Default,
|
||||
}
|
||||
|
||||
// An interface with these flags should be valid for SSDP.
|
||||
|
||||
@@ -48,7 +48,9 @@ var Command = &cobra.Command{
|
||||
Use: "docker",
|
||||
Short: `Serve any remote on docker's volume plugin API.`,
|
||||
Long: strings.ReplaceAll(longHelp, "|", "`") + vfs.Help,
|
||||
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -99,6 +99,9 @@ By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
` + vfs.Help + proxy.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.44",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"html/template"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -12,14 +13,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/http/data"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
httplib "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/lib/http/auth"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
@@ -28,20 +26,27 @@ import (
|
||||
|
||||
// Options required for http server
|
||||
type Options struct {
|
||||
data.Options
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Template libhttp.TemplateConfig
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{}
|
||||
var DefaultOpt = Options{
|
||||
Auth: libhttp.DefaultAuthCfg(),
|
||||
HTTP: libhttp.DefaultCfg(),
|
||||
Template: libhttp.DefaultTemplateCfg(),
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
|
||||
func init() {
|
||||
data.AddFlags(Command.Flags(), "", &Opt.Options)
|
||||
httplib.AddFlags(Command.Flags())
|
||||
auth.AddFlags(Command.Flags())
|
||||
vfsflags.AddFlags(Command.Flags())
|
||||
flagSet := Command.Flags()
|
||||
libhttp.AddAuthFlagsPrefix(flagSet, "", &Opt.Auth)
|
||||
libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
|
||||
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -59,57 +64,67 @@ The server will log errors. Use ` + "`-v`" + ` to see access logs.
|
||||
|
||||
` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to
|
||||
control the stats printing.
|
||||
` + httplib.Help + data.Help + auth.Help + vfs.Help,
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s := newServer(f, Opt.Template)
|
||||
router, err := httplib.Router()
|
||||
ctx := context.Background()
|
||||
|
||||
s, err := run(ctx, f, Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.Bind(router)
|
||||
httplib.Wait()
|
||||
|
||||
s.server.Wait()
|
||||
return nil
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
// server contains everything to run the server
|
||||
type server struct {
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
HTMLTemplate *template.Template // HTML template for web interface
|
||||
type serveCmd struct {
|
||||
f fs.Fs
|
||||
vfs *vfs.VFS
|
||||
server *libhttp.Server
|
||||
}
|
||||
|
||||
func newServer(f fs.Fs, templatePath string) *server {
|
||||
htmlTemplate, templateErr := data.GetTemplate(templatePath)
|
||||
if templateErr != nil {
|
||||
log.Fatalf(templateErr.Error())
|
||||
}
|
||||
s := &server{
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
HTMLTemplate: htmlTemplate,
|
||||
}
|
||||
return s
|
||||
}
|
||||
func run(ctx context.Context, f fs.Fs, opt Options) (*serveCmd, error) {
|
||||
var err error
|
||||
|
||||
func (s *server) Bind(router chi.Router) {
|
||||
if m := auth.Auth(auth.Opt); m != nil {
|
||||
router.Use(m)
|
||||
s := &serveCmd{
|
||||
f: f,
|
||||
vfs: vfs.New(f, &vfsflags.Opt),
|
||||
}
|
||||
|
||||
s.server, err = libhttp.NewServer(ctx,
|
||||
libhttp.WithConfig(opt.HTTP),
|
||||
libhttp.WithAuth(opt.Auth),
|
||||
libhttp.WithTemplate(opt.Template),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init server: %w", err)
|
||||
}
|
||||
|
||||
router := s.server.Router()
|
||||
router.Use(
|
||||
middleware.SetHeader("Accept-Ranges", "bytes"),
|
||||
middleware.SetHeader("Server", "rclone/"+fs.Version),
|
||||
)
|
||||
router.Get("/*", s.handler)
|
||||
router.Head("/*", s.handler)
|
||||
|
||||
s.server.Serve()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// handler reads incoming requests and dispatches them
|
||||
func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *serveCmd) handler(w http.ResponseWriter, r *http.Request) {
|
||||
isDir := strings.HasSuffix(r.URL.Path, "/")
|
||||
remote := strings.Trim(r.URL.Path, "/")
|
||||
if isDir {
|
||||
@@ -120,7 +135,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// serveDir serves a directory index at dirRemote
|
||||
func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
func (s *serveCmd) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||
// List the directory
|
||||
node, err := s.vfs.Stat(dirRemote)
|
||||
if err == vfs.ENOENT {
|
||||
@@ -142,7 +157,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
|
||||
}
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, s.HTMLTemplate)
|
||||
directory := serve.NewDirectory(dirRemote, s.server.HTMLTemplate())
|
||||
for _, node := range dirEntries {
|
||||
if vfsflags.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
@@ -162,7 +177,7 @@ func (s *server) serveDir(w http.ResponseWriter, r *http.Request, dirRemote stri
|
||||
}
|
||||
|
||||
// serveFile serves a file object at remote
|
||||
func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
func (s *serveCmd) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
node, err := s.vfs.Stat(remote)
|
||||
if err == vfs.ENOENT {
|
||||
fs.Infof(remote, "%s: File not found", r.RemoteAddr)
|
||||
|
||||
@@ -14,14 +14,14 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
httplib "github.com/rclone/rclone/lib/http"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
updateGolden = flag.Bool("updategolden", false, "update golden files for regression test")
|
||||
httpServer *server
|
||||
sc *serveCmd
|
||||
testURL string
|
||||
)
|
||||
|
||||
@@ -30,16 +30,25 @@ const (
|
||||
testTemplate = "testdata/golden/testindex.html"
|
||||
)
|
||||
|
||||
func startServer(t *testing.T, f fs.Fs) {
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
httpServer = newServer(f, testTemplate)
|
||||
router, err := httplib.Router()
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
func start(t *testing.T, f fs.Fs) {
|
||||
ctx := context.Background()
|
||||
|
||||
opts := Options{
|
||||
HTTP: libhttp.DefaultCfg(),
|
||||
Template: libhttp.TemplateConfig{
|
||||
Path: testTemplate,
|
||||
},
|
||||
}
|
||||
httpServer.Bind(router)
|
||||
testURL = httplib.URL()
|
||||
opts.HTTP.ListenAddr = []string{testBindAddress}
|
||||
|
||||
s, err := run(ctx, f, opts)
|
||||
require.NoError(t, err, "failed to start server")
|
||||
sc = s
|
||||
|
||||
urls := s.server.URLs()
|
||||
require.Len(t, urls, 1, "expected one URL")
|
||||
|
||||
testURL = urls[0]
|
||||
|
||||
// try to connect to the test server
|
||||
pause := time.Millisecond
|
||||
@@ -54,7 +63,6 @@ func startServer(t *testing.T, f fs.Fs) {
|
||||
pause *= 2
|
||||
}
|
||||
t.Fatal("couldn't connect to server")
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -84,7 +92,7 @@ func TestInit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, obj.SetModTime(context.Background(), expectedTime))
|
||||
|
||||
startServer(t, f)
|
||||
start(t, f)
|
||||
}
|
||||
|
||||
// check body against the file, or re-write body if -updategolden is
|
||||
@@ -229,7 +237,3 @@ func TestGET(t *testing.T) {
|
||||
checkGolden(t, test.Golden, body)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFinalise(t *testing.T) {
|
||||
_ = httplib.Shutdown()
|
||||
}
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
// Package httpflags provides utility functionality to HTTP.
|
||||
package httpflags
|
||||
|
||||
import (
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// Options set by command line flags
|
||||
var (
|
||||
Opt = httplib.DefaultOpt
|
||||
)
|
||||
|
||||
// AddFlagsPrefix adds flags for the httplib
|
||||
func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options) {
|
||||
rc.AddOption(prefix+"http", &Opt)
|
||||
flags.StringVarP(flagSet, &Opt.ListenAddr, prefix+"addr", "", Opt.ListenAddr, "IPaddress:Port or :Port to bind server to")
|
||||
flags.DurationVarP(flagSet, &Opt.ServerReadTimeout, prefix+"server-read-timeout", "", Opt.ServerReadTimeout, "Timeout for server reading data")
|
||||
flags.DurationVarP(flagSet, &Opt.ServerWriteTimeout, prefix+"server-write-timeout", "", Opt.ServerWriteTimeout, "Timeout for server writing data")
|
||||
flags.IntVarP(flagSet, &Opt.MaxHeaderBytes, prefix+"max-header-bytes", "", Opt.MaxHeaderBytes, "Maximum size of request header")
|
||||
flags.StringVarP(flagSet, &Opt.SslCert, prefix+"cert", "", Opt.SslCert, "SSL PEM key (concatenation of certificate and CA certificate)")
|
||||
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
|
||||
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
|
||||
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
|
||||
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
|
||||
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")
|
||||
flags.StringVarP(flagSet, &Opt.Template, prefix+"template", "", Opt.Template, "User-specified template")
|
||||
flags.StringVarP(flagSet, &Opt.MinTLSVersion, prefix+"min-tls-version", "", Opt.MinTLSVersion, "Minimum TLS version that is acceptable")
|
||||
|
||||
}
|
||||
|
||||
// AddFlags adds flags for the httplib
|
||||
func AddFlags(flagSet *pflag.FlagSet) {
|
||||
AddFlagsPrefix(flagSet, "", &Opt)
|
||||
}
|
||||
@@ -1,438 +0,0 @@
|
||||
// Package httplib provides common functionality for http servers
|
||||
//
|
||||
// Deprecated: httplib has been replaced with lib/http
|
||||
package httplib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
auth "github.com/abbot/go-http-auth"
|
||||
"github.com/rclone/rclone/cmd/serve/http/data"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var ()
|
||||
|
||||
// Help contains text describing the http server to add to the command
|
||||
// help.
|
||||
var Help = `
|
||||
### Server options
|
||||
|
||||
Use ` + "`--addr`" + ` to specify which IP address and port the server should
|
||||
listen on, e.g. ` + "`--addr 1.2.3.4:8000` or `--addr :8080`" + ` to
|
||||
listen to all IPs. By default it only listens on localhost. You can use port
|
||||
:0 to let the OS choose an available port.
|
||||
|
||||
If you set ` + "`--addr`" + ` to listen on a public or LAN accessible IP address
|
||||
then using Authentication is advised - see the next section for info.
|
||||
|
||||
` + "`--server-read-timeout` and `--server-write-timeout`" + ` can be used to
|
||||
control the timeouts on the server. Note that this is the total time
|
||||
for a transfer.
|
||||
|
||||
` + "`--max-header-bytes`" + ` controls the maximum number of bytes the server will
|
||||
accept in the HTTP header.
|
||||
|
||||
` + "`--baseurl`" + ` controls the URL prefix that rclone serves from. By default
|
||||
rclone will serve from the root. If you used ` + "`--baseurl \"/rclone\"`" + ` then
|
||||
rclone would serve from a URL starting with "/rclone/". This is
|
||||
useful if you wish to proxy rclone serve. Rclone automatically
|
||||
inserts leading and trailing "/" on ` + "`--baseurl`" + `, so ` + "`--baseurl \"rclone\"`" + `,
|
||||
` + "`--baseurl \"/rclone\"` and `--baseurl \"/rclone/\"`" + ` are all treated
|
||||
identically.
|
||||
|
||||
` + "`--template`" + ` allows a user to specify a custom markup template for HTTP
|
||||
and WebDAV serve functions. The server exports the following markup
|
||||
to be used within the template to server pages:
|
||||
|
||||
| Parameter | Description |
|
||||
| :---------- | :---------- |
|
||||
| .Name | The full path of a file/directory. |
|
||||
| .Title | Directory listing of .Name |
|
||||
| .Sort | The current sort used. This is changeable via ?sort= parameter |
|
||||
| | Sort Options: namedirfirst,name,size,time (default namedirfirst) |
|
||||
| .Order | The current ordering used. This is changeable via ?order= parameter |
|
||||
| | Order Options: asc,desc (default asc) |
|
||||
| .Query | Currently unused. |
|
||||
| .Breadcrumb | Allows for creating a relative navigation |
|
||||
|-- .Link | The relative to the root link of the Text. |
|
||||
|-- .Text | The Name of the directory. |
|
||||
| .Entries | Information about a specific file/directory. |
|
||||
|-- .URL | The 'url' of an entry. |
|
||||
|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. |
|
||||
|-- .IsDir | Boolean for if an entry is a directory or not. |
|
||||
|-- .Size | Size in Bytes of the entry. |
|
||||
|-- .ModTime | The UTC timestamp of an entry. |
|
||||
|
||||
#### Authentication
|
||||
|
||||
By default this will serve files without needing a login.
|
||||
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the ` + "`--user` and `--pass`" + ` flags.
|
||||
|
||||
Use ` + "`--htpasswd /path/to/htpasswd`" + ` to provide an htpasswd file. This is
|
||||
in standard apache format and supports MD5, SHA1 and BCrypt for basic
|
||||
authentication. Bcrypt is recommended.
|
||||
|
||||
To create an htpasswd file:
|
||||
|
||||
touch htpasswd
|
||||
htpasswd -B htpasswd user
|
||||
htpasswd -B htpasswd anotherUser
|
||||
|
||||
The password file can be updated while rclone is running.
|
||||
|
||||
Use ` + "`--realm`" + ` to set the authentication realm.
|
||||
|
||||
#### SSL/TLS
|
||||
|
||||
By default this will serve over HTTP. If you want you can serve over
|
||||
HTTPS. You will need to supply the ` + "`--cert` and `--key`" + ` flags.
|
||||
If you wish to do client side certificate validation then you will need to
|
||||
supply ` + "`--client-ca`" + ` also.
|
||||
|
||||
` + "`--cert`" + ` should be either a PEM encoded certificate or a concatenation
|
||||
of that with the CA certificate. ` + "`--key`" + ` should be the PEM encoded
|
||||
private key and ` + "`--client-ca`" + ` should be the PEM encoded client
|
||||
certificate authority certificate.
|
||||
|
||||
--min-tls-version is minimum TLS version that is acceptable. Valid
|
||||
values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default
|
||||
"tls1.0").
|
||||
`
|
||||
|
||||
// Options contains options for the http Server
|
||||
type Options struct {
|
||||
ListenAddr string // Port to listen on
|
||||
BaseURL string // prefix to strip from URLs
|
||||
ServerReadTimeout time.Duration // Timeout for server reading data
|
||||
ServerWriteTimeout time.Duration // Timeout for server writing data
|
||||
MaxHeaderBytes int // Maximum size of request header
|
||||
SslCert string // SSL PEM key (concatenation of certificate and CA certificate)
|
||||
SslKey string // SSL PEM Private key
|
||||
ClientCA string // Client certificate authority to verify clients with
|
||||
HtPasswd string // htpasswd file - if not provided no authentication is done
|
||||
Realm string // realm for authentication
|
||||
BasicUser string // single username for basic auth if not using Htpasswd
|
||||
BasicPass string // password for BasicUser
|
||||
Auth AuthFn `json:"-"` // custom Auth (not set by command line flags)
|
||||
Template string // User specified template
|
||||
MinTLSVersion string // MinTLSVersion contains the minimum TLS version that is acceptable
|
||||
}
|
||||
|
||||
// AuthFn if used will be used to authenticate user, pass. If an error
|
||||
// is returned then the user is not authenticated.
|
||||
//
|
||||
// If a non nil value is returned then it is added to the context under the key
|
||||
type AuthFn func(user, pass string) (value interface{}, err error)
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
ListenAddr: "localhost:8080",
|
||||
Realm: "rclone",
|
||||
ServerReadTimeout: 1 * time.Hour,
|
||||
ServerWriteTimeout: 1 * time.Hour,
|
||||
MaxHeaderBytes: 4096,
|
||||
MinTLSVersion: "tls1.0",
|
||||
}
|
||||
|
||||
// Server contains info about the running http server
|
||||
type Server struct {
|
||||
Opt Options
|
||||
handler http.Handler // original handler
|
||||
listener net.Listener
|
||||
waitChan chan struct{} // for waiting on the listener to close
|
||||
httpServer *http.Server
|
||||
basicPassHashed string
|
||||
useSSL bool // if server is configured for SSL/TLS
|
||||
usingAuth bool // set if authentication is configured
|
||||
HTMLTemplate *template.Template // HTML template for web interface
|
||||
}
|
||||
|
||||
type contextUserType struct{}
|
||||
|
||||
// ContextUserKey is a simple context key for storing the username of the request
|
||||
var ContextUserKey = &contextUserType{}
|
||||
|
||||
type contextAuthType struct{}
|
||||
|
||||
// ContextAuthKey is a simple context key for storing info returned by AuthFn
|
||||
var ContextAuthKey = &contextAuthType{}
|
||||
|
||||
// singleUserProvider provides the encrypted password for a single user
|
||||
func (s *Server) singleUserProvider(user, realm string) string {
|
||||
if user == s.Opt.BasicUser {
|
||||
return s.basicPassHashed
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseAuthorization parses the Authorization header into user, pass
|
||||
// it returns a boolean as to whether the parse was successful
|
||||
func parseAuthorization(r *http.Request) (user, pass string, ok bool) {
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader != "" {
|
||||
s := strings.SplitN(authHeader, " ", 2)
|
||||
if len(s) == 2 && s[0] == "Basic" {
|
||||
b, err := base64.StdEncoding.DecodeString(s[1])
|
||||
if err == nil {
|
||||
parts := strings.SplitN(string(b), ":", 2)
|
||||
user = parts[0]
|
||||
if len(parts) > 1 {
|
||||
pass = parts[1]
|
||||
ok = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// NewServer creates an http server. The opt can be nil in which case
|
||||
// the default options will be used.
|
||||
func NewServer(handler http.Handler, opt *Options) *Server {
|
||||
s := &Server{
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
// Make a copy of the options
|
||||
if opt != nil {
|
||||
s.Opt = *opt
|
||||
} else {
|
||||
s.Opt = DefaultOpt
|
||||
}
|
||||
|
||||
// Use htpasswd if required on everything
|
||||
if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" || s.Opt.Auth != nil {
|
||||
var authenticator *auth.BasicAuth
|
||||
if s.Opt.Auth == nil {
|
||||
var secretProvider auth.SecretProvider
|
||||
if s.Opt.HtPasswd != "" {
|
||||
fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd)
|
||||
secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd)
|
||||
} else {
|
||||
fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser)
|
||||
s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$")))
|
||||
secretProvider = s.singleUserProvider
|
||||
}
|
||||
authenticator = auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider)
|
||||
}
|
||||
oldHandler := handler
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// No auth wanted for OPTIONS method
|
||||
if r.Method == "OPTIONS" {
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
unauthorized := func() {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Header().Set("WWW-Authenticate", `Basic realm="`+s.Opt.Realm+`"`)
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
}
|
||||
user, pass, authValid := parseAuthorization(r)
|
||||
if !authValid {
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
if s.Opt.Auth == nil {
|
||||
if username := authenticator.CheckAuth(r); username == "" {
|
||||
fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user)
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Custom Auth
|
||||
value, err := s.Opt.Auth(user, pass)
|
||||
if err != nil {
|
||||
fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err)
|
||||
unauthorized()
|
||||
return
|
||||
}
|
||||
if value != nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextAuthKey, value))
|
||||
}
|
||||
}
|
||||
r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, user))
|
||||
oldHandler.ServeHTTP(w, r)
|
||||
})
|
||||
s.usingAuth = true
|
||||
}
|
||||
|
||||
s.useSSL = s.Opt.SslKey != ""
|
||||
if (s.Opt.SslCert != "") != s.useSSL {
|
||||
log.Fatalf("Need both -cert and -key to use SSL")
|
||||
}
|
||||
|
||||
// If a Base URL is set then serve from there
|
||||
s.Opt.BaseURL = strings.Trim(s.Opt.BaseURL, "/")
|
||||
if s.Opt.BaseURL != "" {
|
||||
s.Opt.BaseURL = "/" + s.Opt.BaseURL
|
||||
}
|
||||
|
||||
var minTLSVersion uint16
|
||||
switch opt.MinTLSVersion {
|
||||
case "tls1.0":
|
||||
minTLSVersion = tls.VersionTLS10
|
||||
case "tls1.1":
|
||||
minTLSVersion = tls.VersionTLS11
|
||||
case "tls1.2":
|
||||
minTLSVersion = tls.VersionTLS12
|
||||
case "tls1.3":
|
||||
minTLSVersion = tls.VersionTLS13
|
||||
default:
|
||||
log.Fatalf("Invalid value for --min-tls-version")
|
||||
}
|
||||
|
||||
// FIXME make a transport?
|
||||
s.httpServer = &http.Server{
|
||||
Addr: s.Opt.ListenAddr,
|
||||
Handler: handler,
|
||||
ReadTimeout: s.Opt.ServerReadTimeout,
|
||||
WriteTimeout: s.Opt.ServerWriteTimeout,
|
||||
MaxHeaderBytes: s.Opt.MaxHeaderBytes,
|
||||
ReadHeaderTimeout: 10 * time.Second, // time to send the headers
|
||||
IdleTimeout: 60 * time.Second, // time to keep idle connections open
|
||||
TLSConfig: &tls.Config{
|
||||
MinVersion: minTLSVersion,
|
||||
},
|
||||
}
|
||||
|
||||
if s.Opt.ClientCA != "" {
|
||||
if !s.useSSL {
|
||||
log.Fatalf("Can't use --client-ca without --cert and --key")
|
||||
}
|
||||
certpool := x509.NewCertPool()
|
||||
pem, err := os.ReadFile(s.Opt.ClientCA)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read client certificate authority: %v", err)
|
||||
}
|
||||
if !certpool.AppendCertsFromPEM(pem) {
|
||||
log.Fatalf("Can't parse client certificate authority")
|
||||
}
|
||||
s.httpServer.TLSConfig.ClientCAs = certpool
|
||||
s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
|
||||
}
|
||||
|
||||
htmlTemplate, templateErr := data.GetTemplate(s.Opt.Template)
|
||||
if templateErr != nil {
|
||||
log.Fatalf(templateErr.Error())
|
||||
}
|
||||
s.HTMLTemplate = htmlTemplate
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Serve runs the server - returns an error only if
|
||||
// the listener was not started; does not block, so
|
||||
// use s.Wait() to block on the listener indefinitely.
|
||||
func (s *Server) Serve() error {
|
||||
ln, err := net.Listen("tcp", s.httpServer.Addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("start server failed: %w", err)
|
||||
}
|
||||
s.listener = ln
|
||||
s.waitChan = make(chan struct{})
|
||||
go func() {
|
||||
var err error
|
||||
if s.useSSL {
|
||||
// hacky hack to get this to work with old Go versions, which
|
||||
// don't have ServeTLS on http.Server; see PR #2194.
|
||||
type tlsServer interface {
|
||||
ServeTLS(ln net.Listener, cert, key string) error
|
||||
}
|
||||
srvIface := interface{}(s.httpServer)
|
||||
if tlsSrv, ok := srvIface.(tlsServer); ok {
|
||||
// yay -- we get easy TLS support with HTTP/2
|
||||
err = tlsSrv.ServeTLS(s.listener, s.Opt.SslCert, s.Opt.SslKey)
|
||||
} else {
|
||||
// oh well -- we can still do TLS but might not have HTTP/2
|
||||
tlsConfig := new(tls.Config)
|
||||
tlsConfig.Certificates = make([]tls.Certificate, 1)
|
||||
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(s.Opt.SslCert, s.Opt.SslKey)
|
||||
if err != nil {
|
||||
log.Printf("Error loading key pair: %v", err)
|
||||
}
|
||||
tlsLn := tls.NewListener(s.listener, tlsConfig)
|
||||
err = s.httpServer.Serve(tlsLn)
|
||||
}
|
||||
} else {
|
||||
err = s.httpServer.Serve(s.listener)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Error on serving HTTP server: %v", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait blocks while the listener is open.
|
||||
func (s *Server) Wait() {
|
||||
<-s.waitChan
|
||||
}
|
||||
|
||||
// Close shuts the running server down
|
||||
func (s *Server) Close() {
|
||||
err := s.httpServer.Close()
|
||||
if err != nil {
|
||||
log.Printf("Error on closing HTTP server: %v", err)
|
||||
return
|
||||
}
|
||||
close(s.waitChan)
|
||||
}
|
||||
|
||||
// URL returns the serving address of this server
|
||||
func (s *Server) URL() string {
|
||||
proto := "http"
|
||||
if s.useSSL {
|
||||
proto = "https"
|
||||
}
|
||||
addr := s.Opt.ListenAddr
|
||||
// prefer actual listener address if using ":port" or "addr:0"
|
||||
useActualAddress := addr == "" || addr[0] == ':' || addr[len(addr)-1] == ':' || strings.HasSuffix(addr, ":0")
|
||||
if s.listener != nil && useActualAddress {
|
||||
// use actual listener address; required if using 0-port
|
||||
// (i.e. port assigned by operating system)
|
||||
addr = s.listener.Addr().String()
|
||||
}
|
||||
return fmt.Sprintf("%s://%s%s/", proto, addr, s.Opt.BaseURL)
|
||||
}
|
||||
|
||||
// UsingAuth returns true if authentication is required
|
||||
func (s *Server) UsingAuth() bool {
|
||||
return s.usingAuth
|
||||
}
|
||||
|
||||
// Path returns the current path with the Prefix stripped
|
||||
//
|
||||
// If it returns false, then the path was invalid and the handler
|
||||
// should exit as the error response has already been sent
|
||||
func (s *Server) Path(w http.ResponseWriter, r *http.Request) (Path string, ok bool) {
|
||||
Path = r.URL.Path
|
||||
if s.Opt.BaseURL == "" {
|
||||
return Path, true
|
||||
}
|
||||
if !strings.HasPrefix(Path, s.Opt.BaseURL+"/") {
|
||||
// Send a redirect if the BaseURL was requested without a /
|
||||
if Path == s.Opt.BaseURL {
|
||||
http.Redirect(w, r, s.Opt.BaseURL+"/", http.StatusPermanentRedirect)
|
||||
return Path, false
|
||||
}
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return Path, false
|
||||
}
|
||||
Path = Path[len(s.Opt.BaseURL):]
|
||||
return Path, true
|
||||
}
|
||||
@@ -9,20 +9,22 @@ import (
|
||||
|
||||
// cache implements a simple object cache
|
||||
type cache struct {
|
||||
mu sync.RWMutex // protects the cache
|
||||
items map[string]fs.Object // cache of objects
|
||||
mu sync.RWMutex // protects the cache
|
||||
items map[string]fs.Object // cache of objects
|
||||
cacheObjects bool // whether we are actually caching
|
||||
}
|
||||
|
||||
// create a new cache
|
||||
func newCache() *cache {
|
||||
func newCache(cacheObjects bool) *cache {
|
||||
return &cache{
|
||||
items: map[string]fs.Object{},
|
||||
items: map[string]fs.Object{},
|
||||
cacheObjects: cacheObjects,
|
||||
}
|
||||
}
|
||||
|
||||
// find the object at remote or return nil
|
||||
func (c *cache) find(remote string) fs.Object {
|
||||
if !cacheObjects {
|
||||
if !c.cacheObjects {
|
||||
return nil
|
||||
}
|
||||
c.mu.RLock()
|
||||
@@ -33,7 +35,7 @@ func (c *cache) find(remote string) fs.Object {
|
||||
|
||||
// add the object to the cache
|
||||
func (c *cache) add(remote string, o fs.Object) {
|
||||
if !cacheObjects {
|
||||
if !c.cacheObjects {
|
||||
return
|
||||
}
|
||||
c.mu.Lock()
|
||||
@@ -43,7 +45,7 @@ func (c *cache) add(remote string, o fs.Object) {
|
||||
|
||||
// remove the object from the cache
|
||||
func (c *cache) remove(remote string) {
|
||||
if !cacheObjects {
|
||||
if !c.cacheObjects {
|
||||
return
|
||||
}
|
||||
c.mu.Lock()
|
||||
@@ -53,7 +55,7 @@ func (c *cache) remove(remote string) {
|
||||
|
||||
// remove all the items with prefix from the cache
|
||||
func (c *cache) removePrefix(prefix string) {
|
||||
if !cacheObjects {
|
||||
if !c.cacheObjects {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ func (c *cache) String() string {
|
||||
}
|
||||
|
||||
func TestCacheCRUD(t *testing.T) {
|
||||
c := newCache()
|
||||
c := newCache(true)
|
||||
assert.Equal(t, "", c.String())
|
||||
assert.Nil(t, c.find("potato"))
|
||||
o := mockobject.New("potato")
|
||||
@@ -35,7 +35,7 @@ func TestCacheCRUD(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCacheRemovePrefix(t *testing.T) {
|
||||
c := newCache()
|
||||
c := newCache(true)
|
||||
for _, remote := range []string{
|
||||
"a",
|
||||
"b",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -12,41 +13,55 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
var (
|
||||
stdio bool
|
||||
appendOnly bool
|
||||
privateRepos bool
|
||||
cacheObjects bool
|
||||
)
|
||||
// Options required for http server
|
||||
type Options struct {
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Stdio bool
|
||||
AppendOnly bool
|
||||
PrivateRepos bool
|
||||
CacheObjects bool
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
Auth: libhttp.DefaultAuthCfg(),
|
||||
HTTP: libhttp.DefaultCfg(),
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
|
||||
func init() {
|
||||
httpflags.AddFlags(Command.Flags())
|
||||
flagSet := Command.Flags()
|
||||
flags.BoolVarP(flagSet, &stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout")
|
||||
flags.BoolVarP(flagSet, &appendOnly, "append-only", "", false, "Disallow deletion of repository data")
|
||||
flags.BoolVarP(flagSet, &privateRepos, "private-repos", "", false, "Users can only access their private repo")
|
||||
flags.BoolVarP(flagSet, &cacheObjects, "cache-objects", "", true, "Cache listed objects")
|
||||
libhttp.AddAuthFlagsPrefix(flagSet, "", &Opt.Auth)
|
||||
libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
|
||||
flags.BoolVarP(flagSet, &Opt.Stdio, "stdio", "", false, "Run an HTTP2 server on stdin/stdout")
|
||||
flags.BoolVarP(flagSet, &Opt.AppendOnly, "append-only", "", false, "Disallow deletion of repository data")
|
||||
flags.BoolVarP(flagSet, &Opt.PrivateRepos, "private-repos", "", false, "Users can only access their private repo")
|
||||
flags.BoolVarP(flagSet, &Opt.CacheObjects, "cache-objects", "", true, "Cache listed objects")
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
var Command = &cobra.Command{
|
||||
Use: "restic remote:path",
|
||||
Short: `Serve the remote for restic's REST API.`,
|
||||
Long: `Run a basic web server to serve a remove over restic's REST backend
|
||||
Long: `Run a basic web server to serve a remote over restic's REST backend
|
||||
API over HTTP. This allows restic to use rclone as a data storage
|
||||
mechanism for cloud providers that restic does not support directly.
|
||||
|
||||
@@ -127,13 +142,20 @@ these **must** end with /. Eg
|
||||
|
||||
The` + "`--private-repos`" + ` flag can be used to limit users to repositories starting
|
||||
with a path of ` + "`/<username>/`" + `.
|
||||
` + httplib.Help,
|
||||
` + libhttp.Help + libhttp.AuthHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, true, command, func() error {
|
||||
s := NewServer(f, &httpflags.Opt)
|
||||
if stdio {
|
||||
s, err := newServer(ctx, f, &Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.opt.Stdio {
|
||||
if terminal.IsTerminal(int(os.Stdout.Fd())) {
|
||||
return errors.New("refusing to run HTTP2 server directly on a terminal, please let restic start rclone")
|
||||
}
|
||||
@@ -145,15 +167,12 @@ with a path of ` + "`/<username>/`" + `.
|
||||
|
||||
httpSrv := &http2.Server{}
|
||||
opts := &http2.ServeConnOpts{
|
||||
Handler: s,
|
||||
Handler: s.Server.Router(),
|
||||
}
|
||||
httpSrv.ServeConn(conn, opts)
|
||||
return nil
|
||||
}
|
||||
err := s.Serve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Logf(s.f, "Serving restic REST API on %s", s.URLs())
|
||||
s.Wait()
|
||||
return nil
|
||||
})
|
||||
@@ -164,101 +183,134 @@ const (
|
||||
resticAPIV2 = "application/vnd.x.restic.rest.v2"
|
||||
)
|
||||
|
||||
// Server contains everything to run the Server
|
||||
type Server struct {
|
||||
*httplib.Server
|
||||
type contextRemoteType struct{}
|
||||
|
||||
// ContextRemoteKey is a simple context key for storing the username of the request
|
||||
var ContextRemoteKey = &contextRemoteType{}
|
||||
|
||||
// WithRemote makes a remote from a URL path. This implements the backend layout
|
||||
// required by restic.
|
||||
func WithRemote(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var urlpath string
|
||||
rctx := chi.RouteContext(r.Context())
|
||||
if rctx != nil && rctx.RoutePath != "" {
|
||||
urlpath = rctx.RoutePath
|
||||
} else {
|
||||
urlpath = r.URL.Path
|
||||
}
|
||||
urlpath = strings.Trim(urlpath, "/")
|
||||
parts := matchData.FindStringSubmatch(urlpath)
|
||||
// if no data directory, layout is flat
|
||||
if parts != nil {
|
||||
// otherwise map
|
||||
// data/2159dd48 to
|
||||
// data/21/2159dd48
|
||||
fileName := parts[1]
|
||||
prefix := urlpath[:len(urlpath)-len(fileName)]
|
||||
urlpath = prefix + fileName[:2] + "/" + fileName
|
||||
}
|
||||
ctx := context.WithValue(r.Context(), ContextRemoteKey, urlpath)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
// Middleware to ensure authenticated user is accessing their own private folder
|
||||
func checkPrivate(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
user := chi.URLParam(r, "userID")
|
||||
userID, ok := libhttp.CtxGetUser(r.Context())
|
||||
if ok && user != "" && user == userID {
|
||||
next.ServeHTTP(w, r)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// server contains everything to run the server
|
||||
type server struct {
|
||||
*libhttp.Server
|
||||
f fs.Fs
|
||||
cache *cache
|
||||
opt Options
|
||||
}
|
||||
|
||||
// NewServer returns an HTTP server that speaks the rest protocol
|
||||
func NewServer(f fs.Fs, opt *httplib.Options) *Server {
|
||||
mux := http.NewServeMux()
|
||||
s := &Server{
|
||||
Server: httplib.NewServer(mux, opt),
|
||||
f: f,
|
||||
cache: newCache(),
|
||||
func newServer(ctx context.Context, f fs.Fs, opt *Options) (s *server, err error) {
|
||||
s = &server{
|
||||
f: f,
|
||||
cache: newCache(opt.CacheObjects),
|
||||
opt: *opt,
|
||||
}
|
||||
mux.HandleFunc(s.Opt.BaseURL+"/", s.ServeHTTP)
|
||||
return s
|
||||
}
|
||||
|
||||
// Serve runs the http server in the background.
|
||||
//
|
||||
// Use s.Close() and s.Wait() to shutdown server
|
||||
func (s *Server) Serve() error {
|
||||
err := s.Server.Serve()
|
||||
// Don't bind any HTTP listeners if running with --stdio
|
||||
if opt.Stdio {
|
||||
opt.HTTP.ListenAddr = nil
|
||||
}
|
||||
s.Server, err = libhttp.NewServer(ctx,
|
||||
libhttp.WithConfig(opt.HTTP),
|
||||
libhttp.WithAuth(opt.Auth),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, fmt.Errorf("failed to init server: %w", err)
|
||||
}
|
||||
router := s.Router()
|
||||
s.Bind(router)
|
||||
s.Server.Serve()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// bind helper for main Bind method
|
||||
func (s *server) bind(router chi.Router) {
|
||||
router.MethodFunc("GET", "/*", func(w http.ResponseWriter, r *http.Request) {
|
||||
urlpath := chi.URLParam(r, "*")
|
||||
if urlpath == "" || strings.HasSuffix(urlpath, "/") {
|
||||
s.listObjects(w, r)
|
||||
} else {
|
||||
s.serveObject(w, r)
|
||||
}
|
||||
})
|
||||
router.MethodFunc("POST", "/*", func(w http.ResponseWriter, r *http.Request) {
|
||||
urlpath := chi.URLParam(r, "*")
|
||||
if urlpath == "" || strings.HasSuffix(urlpath, "/") {
|
||||
s.createRepo(w, r)
|
||||
} else {
|
||||
s.postObject(w, r)
|
||||
}
|
||||
})
|
||||
router.MethodFunc("HEAD", "/*", s.serveObject)
|
||||
router.MethodFunc("DELETE", "/*", s.deleteObject)
|
||||
}
|
||||
|
||||
// Bind restic server routes to passed router
|
||||
func (s *server) Bind(router chi.Router) {
|
||||
// FIXME
|
||||
// if m := authX.Auth(authX.Opt); m != nil {
|
||||
// router.Use(m)
|
||||
// }
|
||||
router.Use(
|
||||
middleware.SetHeader("Accept-Ranges", "bytes"),
|
||||
middleware.SetHeader("Server", "rclone/"+fs.Version),
|
||||
WithRemote,
|
||||
)
|
||||
|
||||
if s.opt.PrivateRepos {
|
||||
router.Route("/{userID}", func(r chi.Router) {
|
||||
r.Use(checkPrivate)
|
||||
s.bind(r)
|
||||
})
|
||||
router.NotFound(func(w http.ResponseWriter, _ *http.Request) {
|
||||
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
})
|
||||
} else {
|
||||
s.bind(router)
|
||||
}
|
||||
fs.Logf(s.f, "Serving restic REST API on %s", s.URL())
|
||||
return nil
|
||||
}
|
||||
|
||||
var matchData = regexp.MustCompile("(?:^|/)data/([^/]{2,})$")
|
||||
|
||||
// Makes a remote from a URL path. This implements the backend layout
|
||||
// required by restic.
|
||||
func makeRemote(path string) string {
|
||||
path = strings.Trim(path, "/")
|
||||
parts := matchData.FindStringSubmatch(path)
|
||||
// if no data directory, layout is flat
|
||||
if parts == nil {
|
||||
return path
|
||||
}
|
||||
// otherwise map
|
||||
// data/2159dd48 to
|
||||
// data/21/2159dd48
|
||||
fileName := parts[1]
|
||||
prefix := path[:len(path)-len(fileName)]
|
||||
return prefix + fileName[:2] + "/" + fileName
|
||||
}
|
||||
|
||||
// ServeHTTP reads incoming requests and dispatches them
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
w.Header().Set("Server", "rclone/"+fs.Version)
|
||||
|
||||
path, ok := s.Path(w, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
remote := makeRemote(path)
|
||||
fs.Debugf(s.f, "%s %s", r.Method, path)
|
||||
|
||||
v := r.Context().Value(httplib.ContextUserKey)
|
||||
if privateRepos && (v == nil || !strings.HasPrefix(path, "/"+v.(string)+"/")) {
|
||||
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Dispatch on path then method
|
||||
if strings.HasSuffix(path, "/") {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
s.listObjects(w, r, remote)
|
||||
case "POST":
|
||||
s.createRepo(w, r, remote)
|
||||
default:
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
}
|
||||
} else {
|
||||
switch r.Method {
|
||||
case "GET", "HEAD":
|
||||
s.serveObject(w, r, remote)
|
||||
case "POST":
|
||||
s.postObject(w, r, remote)
|
||||
case "DELETE":
|
||||
s.deleteObject(w, r, remote)
|
||||
default:
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newObject returns an object with the remote given either from the
|
||||
// cache or directly
|
||||
func (s *Server) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
func (s *server) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o := s.cache.find(remote)
|
||||
if o != nil {
|
||||
return o, nil
|
||||
@@ -272,7 +324,12 @@ func (s *Server) newObject(ctx context.Context, remote string) (fs.Object, error
|
||||
}
|
||||
|
||||
// get the remote
|
||||
func (s *Server) serveObject(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
func (s *server) serveObject(w http.ResponseWriter, r *http.Request) {
|
||||
remote, ok := r.Context().Value(ContextRemoteKey).(string)
|
||||
if !ok {
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
o, err := s.newObject(r.Context(), remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "%s request error: %v", r.Method, err)
|
||||
@@ -283,8 +340,13 @@ func (s *Server) serveObject(w http.ResponseWriter, r *http.Request, remote stri
|
||||
}
|
||||
|
||||
// postObject posts an object to the repository
|
||||
func (s *Server) postObject(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
if appendOnly {
|
||||
func (s *server) postObject(w http.ResponseWriter, r *http.Request) {
|
||||
remote, ok := r.Context().Value(ContextRemoteKey).(string)
|
||||
if !ok {
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.opt.AppendOnly {
|
||||
// make sure the file does not exist yet
|
||||
_, err := s.newObject(r.Context(), remote)
|
||||
if err == nil {
|
||||
@@ -309,8 +371,13 @@ func (s *Server) postObject(w http.ResponseWriter, r *http.Request, remote strin
|
||||
}
|
||||
|
||||
// delete the remote
|
||||
func (s *Server) deleteObject(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
if appendOnly {
|
||||
func (s *server) deleteObject(w http.ResponseWriter, r *http.Request) {
|
||||
remote, ok := r.Context().Value(ContextRemoteKey).(string)
|
||||
if !ok {
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.opt.AppendOnly {
|
||||
parts := strings.Split(r.URL.Path, "/")
|
||||
|
||||
// if path doesn't end in "/locks/:name", disallow the operation
|
||||
@@ -359,14 +426,18 @@ func (ls *listItems) add(o fs.Object) {
|
||||
}
|
||||
|
||||
// listObjects lists all Objects of a given type in an arbitrary order.
|
||||
func (s *Server) listObjects(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
fs.Debugf(remote, "list request")
|
||||
|
||||
if r.Header.Get("Accept") != resticAPIV2 {
|
||||
fs.Errorf(remote, "Restic v2 API required")
|
||||
http.Error(w, "Restic v2 API required", http.StatusBadRequest)
|
||||
func (s *server) listObjects(w http.ResponseWriter, r *http.Request) {
|
||||
remote, ok := r.Context().Value(ContextRemoteKey).(string)
|
||||
if !ok {
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if r.Header.Get("Accept") != resticAPIV2 {
|
||||
fs.Errorf(remote, "Restic v2 API required for List Objects")
|
||||
http.Error(w, "Restic v2 API required for List Objects", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
fs.Debugf(remote, "list request")
|
||||
|
||||
// make sure an empty list is returned, and not a 'nil' value
|
||||
ls := listItems{}
|
||||
@@ -405,7 +476,12 @@ func (s *Server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
|
||||
// createRepo creates repository directories.
|
||||
//
|
||||
// We don't bother creating the data dirs as rclone will create them on the fly
|
||||
func (s *Server) createRepo(w http.ResponseWriter, r *http.Request, remote string) {
|
||||
func (s *server) createRepo(w http.ResponseWriter, r *http.Request) {
|
||||
remote, ok := r.Context().Value(ContextRemoteKey).(string)
|
||||
if !ok {
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
fs.Infof(remote, "Creating repository")
|
||||
|
||||
if r.URL.Query().Get("create") != "true" {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package restic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/rclone/rclone/fs/config/configfile"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -62,6 +62,7 @@ func createOverwriteDeleteSeq(t testing.TB, path string) []TestRequest {
|
||||
|
||||
// TestResticHandler runs tests on the restic handler code, especially in append-only mode.
|
||||
func TestResticHandler(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
configfile.Install()
|
||||
buf := make([]byte, 32)
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
@@ -110,19 +111,18 @@ func TestResticHandler(t *testing.T) {
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir := t.TempDir()
|
||||
|
||||
// globally set append-only mode
|
||||
prev := appendOnly
|
||||
appendOnly = true
|
||||
defer func() {
|
||||
appendOnly = prev // reset when done
|
||||
}()
|
||||
// set append-only mode
|
||||
opt := newOpt()
|
||||
opt.AppendOnly = true
|
||||
|
||||
// make a new file system in the temp dir
|
||||
f := cmd.NewFsSrc([]string{tempdir})
|
||||
srv := NewServer(f, &httpflags.Opt)
|
||||
s, err := newServer(ctx, f, &opt)
|
||||
require.NoError(t, err)
|
||||
router := s.Server.Router()
|
||||
|
||||
// create the repo
|
||||
checkRequest(t, srv.ServeHTTP,
|
||||
checkRequest(t, router.ServeHTTP,
|
||||
newRequest(t, "POST", "/?create=true", nil),
|
||||
[]wantFunc{wantCode(http.StatusOK)})
|
||||
|
||||
@@ -130,7 +130,7 @@ func TestResticHandler(t *testing.T) {
|
||||
t.Run("", func(t *testing.T) {
|
||||
for i, seq := range test.seq {
|
||||
t.Logf("request %v: %v %v", i, seq.req.Method, seq.req.URL.Path)
|
||||
checkRequest(t, srv.ServeHTTP, seq.req, seq.want)
|
||||
checkRequest(t, router.ServeHTTP, seq.req, seq.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,23 +8,21 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// newAuthenticatedRequest returns a new HTTP request with the given params.
|
||||
func newAuthenticatedRequest(t testing.TB, method, path string, body io.Reader) *http.Request {
|
||||
func newAuthenticatedRequest(t testing.TB, method, path string, body io.Reader, user, pass string) *http.Request {
|
||||
req := newRequest(t, method, path, body)
|
||||
req = req.WithContext(context.WithValue(req.Context(), httplib.ContextUserKey, "test"))
|
||||
req.SetBasicAuth(user, pass)
|
||||
req.Header.Add("Accept", resticAPIV2)
|
||||
return req
|
||||
}
|
||||
|
||||
// TestResticPrivateRepositories runs tests on the restic handler code for private repositories
|
||||
func TestResticPrivateRepositories(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
buf := make([]byte, 32)
|
||||
_, err := io.ReadFull(rand.Reader, buf)
|
||||
require.NoError(t, err)
|
||||
@@ -32,42 +30,49 @@ func TestResticPrivateRepositories(t *testing.T) {
|
||||
// setup rclone with a local backend in a temporary directory
|
||||
tempdir := t.TempDir()
|
||||
|
||||
// globally set private-repos mode & test user
|
||||
prev := privateRepos
|
||||
prevUser := httpflags.Opt.BasicUser
|
||||
prevPassword := httpflags.Opt.BasicPass
|
||||
privateRepos = true
|
||||
httpflags.Opt.BasicUser = "test"
|
||||
httpflags.Opt.BasicPass = "password"
|
||||
// reset when done
|
||||
defer func() {
|
||||
privateRepos = prev
|
||||
httpflags.Opt.BasicUser = prevUser
|
||||
httpflags.Opt.BasicPass = prevPassword
|
||||
}()
|
||||
opt := newOpt()
|
||||
|
||||
// set private-repos mode & test user
|
||||
opt.PrivateRepos = true
|
||||
opt.Auth.BasicUser = "test"
|
||||
opt.Auth.BasicPass = "password"
|
||||
|
||||
// make a new file system in the temp dir
|
||||
f := cmd.NewFsSrc([]string{tempdir})
|
||||
srv := NewServer(f, &httpflags.Opt)
|
||||
s, err := newServer(ctx, f, &opt)
|
||||
require.NoError(t, err)
|
||||
router := s.Server.Router()
|
||||
|
||||
// Requesting /test/ should allow access
|
||||
reqs := []*http.Request{
|
||||
newAuthenticatedRequest(t, "POST", "/test/?create=true", nil),
|
||||
newAuthenticatedRequest(t, "POST", "/test/config", strings.NewReader("foobar test config")),
|
||||
newAuthenticatedRequest(t, "GET", "/test/config", nil),
|
||||
newAuthenticatedRequest(t, "POST", "/test/?create=true", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),
|
||||
newAuthenticatedRequest(t, "POST", "/test/config", strings.NewReader("foobar test config"), opt.Auth.BasicUser, opt.Auth.BasicPass),
|
||||
newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),
|
||||
}
|
||||
for _, req := range reqs {
|
||||
checkRequest(t, srv.ServeHTTP, req, []wantFunc{wantCode(http.StatusOK)})
|
||||
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusOK)})
|
||||
}
|
||||
|
||||
// Requesting with bad credentials should raise unauthorised errors
|
||||
reqs = []*http.Request{
|
||||
newRequest(t, "GET", "/test/config", nil),
|
||||
newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser, ""),
|
||||
newAuthenticatedRequest(t, "GET", "/test/config", nil, "", opt.Auth.BasicPass),
|
||||
newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser+"x", opt.Auth.BasicPass),
|
||||
newAuthenticatedRequest(t, "GET", "/test/config", nil, opt.Auth.BasicUser, opt.Auth.BasicPass+"x"),
|
||||
}
|
||||
for _, req := range reqs {
|
||||
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusUnauthorized)})
|
||||
}
|
||||
|
||||
// Requesting everything else should raise forbidden errors
|
||||
reqs = []*http.Request{
|
||||
newAuthenticatedRequest(t, "GET", "/", nil),
|
||||
newAuthenticatedRequest(t, "POST", "/other_user", nil),
|
||||
newAuthenticatedRequest(t, "GET", "/other_user/config", nil),
|
||||
newAuthenticatedRequest(t, "GET", "/", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),
|
||||
newAuthenticatedRequest(t, "POST", "/other_user", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),
|
||||
newAuthenticatedRequest(t, "GET", "/other_user/config", nil, opt.Auth.BasicUser, opt.Auth.BasicPass),
|
||||
}
|
||||
for _, req := range reqs {
|
||||
checkRequest(t, srv.ServeHTTP, req, []wantFunc{wantCode(http.StatusForbidden)})
|
||||
checkRequest(t, router.ServeHTTP, req, []wantFunc{wantCode(http.StatusForbidden)})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -5,14 +5,16 @@ package restic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -20,16 +22,24 @@ const (
|
||||
resticSource = "../../../../../restic/restic"
|
||||
)
|
||||
|
||||
func newOpt() Options {
|
||||
opt := DefaultOpt
|
||||
opt.HTTP.ListenAddr = []string{testBindAddress}
|
||||
return opt
|
||||
}
|
||||
|
||||
// TestRestic runs the restic server then runs the unit tests for the
|
||||
// restic remote against it.
|
||||
func TestRestic(t *testing.T) {
|
||||
//
|
||||
// Requires the restic source code in the location indicated by resticSource.
|
||||
func TestResticIntegration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
_, err := os.Stat(resticSource)
|
||||
if err != nil {
|
||||
t.Skipf("Skipping test as restic source not found: %v", err)
|
||||
}
|
||||
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt := newOpt()
|
||||
|
||||
fstest.Initialise()
|
||||
|
||||
@@ -41,16 +51,16 @@ func TestRestic(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Start the server
|
||||
w := NewServer(fremote, &opt)
|
||||
assert.NoError(t, w.Serve())
|
||||
s, err := newServer(ctx, fremote, &opt)
|
||||
require.NoError(t, err)
|
||||
testURL := s.Server.URLs()[0]
|
||||
defer func() {
|
||||
w.Close()
|
||||
w.Wait()
|
||||
_ = s.Shutdown()
|
||||
}()
|
||||
|
||||
// Change directory to run the tests
|
||||
err = os.Chdir(resticSource)
|
||||
assert.NoError(t, err, "failed to cd to restic source code")
|
||||
require.NoError(t, err, "failed to cd to restic source code")
|
||||
|
||||
// Run the restic tests
|
||||
runTests := func(path string) {
|
||||
@@ -60,7 +70,7 @@ func TestRestic(t *testing.T) {
|
||||
}
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Env = append(os.Environ(),
|
||||
"RESTIC_TEST_REST_REPOSITORY=rest:"+w.Server.URL()+path,
|
||||
"RESTIC_TEST_REST_REPOSITORY=rest:"+testURL+path,
|
||||
"GO111MODULE=on",
|
||||
)
|
||||
out, err := cmd.CombinedOutput()
|
||||
@@ -81,7 +91,6 @@ func TestMakeRemote(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"/", ""},
|
||||
{"/data", "data"},
|
||||
{"/data/", "data"},
|
||||
@@ -94,7 +103,14 @@ func TestMakeRemote(t *testing.T) {
|
||||
{"/keys/12", "keys/12"},
|
||||
{"/keys/123", "keys/123"},
|
||||
} {
|
||||
got := makeRemote(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
r := httptest.NewRequest("GET", test.in, nil)
|
||||
w := httptest.NewRecorder()
|
||||
next := http.HandlerFunc(func(_ http.ResponseWriter, request *http.Request) {
|
||||
remote, ok := request.Context().Value(ContextRemoteKey).(string)
|
||||
assert.True(t, ok, "Failed to get remote from context")
|
||||
assert.Equal(t, test.want, remote, test.in)
|
||||
})
|
||||
got := WithRemote(next)
|
||||
got.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// declare a few helper functions
|
||||
@@ -15,11 +14,10 @@ import (
|
||||
// wantFunc tests the HTTP response in res and marks the test as errored if something is incorrect.
|
||||
type wantFunc func(t testing.TB, res *httptest.ResponseRecorder)
|
||||
|
||||
// newRequest returns a new HTTP request with the given params. On error, the
|
||||
// test is marked as failed.
|
||||
// newRequest returns a new HTTP request with the given params
|
||||
func newRequest(t testing.TB, method, path string, body io.Reader) *http.Request {
|
||||
req, err := http.NewRequest(method, path, body)
|
||||
require.NoError(t, err)
|
||||
req := httptest.NewRequest(method, path, body)
|
||||
req.Header.Add("Accept", resticAPIV2)
|
||||
return req
|
||||
}
|
||||
|
||||
|
||||
@@ -49,6 +49,9 @@ subcommand to specify the protocol, e.g.
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("serve requires a protocol, e.g. 'rclone serve http remote:'")
|
||||
|
||||
@@ -114,6 +114,9 @@ checksumming is possible but less secure and you could use the SFTP server
|
||||
provided by OpenSSH in this case.
|
||||
|
||||
` + vfs.Help + proxy.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.48",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
|
||||
@@ -10,14 +10,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
chi "github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib/httpflags"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy"
|
||||
"github.com/rclone/rclone/cmd/serve/proxy/proxyflags"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/flags"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
@@ -25,19 +26,37 @@ import (
|
||||
"golang.org/x/net/webdav"
|
||||
)
|
||||
|
||||
var (
|
||||
hashName string
|
||||
hashType = hash.None
|
||||
disableGETDir = false
|
||||
)
|
||||
// Options required for http server
|
||||
type Options struct {
|
||||
Auth libhttp.AuthConfig
|
||||
HTTP libhttp.Config
|
||||
Template libhttp.TemplateConfig
|
||||
HashName string
|
||||
HashType hash.Type
|
||||
DisableGETDir bool
|
||||
}
|
||||
|
||||
// DefaultOpt is the default values used for Options
|
||||
var DefaultOpt = Options{
|
||||
Auth: libhttp.DefaultAuthCfg(),
|
||||
HTTP: libhttp.DefaultCfg(),
|
||||
Template: libhttp.DefaultTemplateCfg(),
|
||||
HashType: hash.None,
|
||||
DisableGETDir: false,
|
||||
}
|
||||
|
||||
// Opt is options set by command line flags
|
||||
var Opt = DefaultOpt
|
||||
|
||||
func init() {
|
||||
flagSet := Command.Flags()
|
||||
httpflags.AddFlags(flagSet)
|
||||
libhttp.AddAuthFlagsPrefix(flagSet, "", &Opt.Auth)
|
||||
libhttp.AddHTTPFlagsPrefix(flagSet, "", &Opt.HTTP)
|
||||
libhttp.AddTemplateFlagsPrefix(flagSet, "", &Opt.Template)
|
||||
vfsflags.AddFlags(flagSet)
|
||||
proxyflags.AddFlags(flagSet)
|
||||
flags.StringVarP(flagSet, &hashName, "etag-hash", "", "", "Which hash to use for the ETag, or auto or blank for off")
|
||||
flags.BoolVarP(flagSet, &disableGETDir, "disable-dir-list", "", false, "Disable HTML directory list on GET request for a directory")
|
||||
flags.StringVarP(flagSet, &Opt.HashName, "etag-hash", "", "", "Which hash to use for the ETag, or auto or blank for off")
|
||||
flags.BoolVarP(flagSet, &Opt.DisableGETDir, "disable-dir-list", "", false, "Disable HTML directory list on GET request for a directory")
|
||||
}
|
||||
|
||||
// Command definition for cobra
|
||||
@@ -60,7 +79,10 @@ supported hash on the backend or you can use a named hash such as
|
||||
"MD5" or "SHA-1". Use the [hashsum](/commands/rclone_hashsum/) command
|
||||
to see the full list.
|
||||
|
||||
` + httplib.Help + vfs.Help + proxy.Help,
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp + vfs.Help + proxy.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
var f fs.Fs
|
||||
if proxyflags.Opt.AuthProxy == "" {
|
||||
@@ -69,21 +91,24 @@ to see the full list.
|
||||
} else {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
}
|
||||
hashType = hash.None
|
||||
if hashName == "auto" {
|
||||
hashType = f.Hashes().GetOne()
|
||||
} else if hashName != "" {
|
||||
err := hashType.Set(hashName)
|
||||
Opt.HashType = hash.None
|
||||
if Opt.HashName == "auto" {
|
||||
Opt.HashType = f.Hashes().GetOne()
|
||||
} else if Opt.HashName != "" {
|
||||
err := Opt.HashType.Set(Opt.HashName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hashType != hash.None {
|
||||
fs.Debugf(f, "Using hash %v for ETag", hashType)
|
||||
if Opt.HashType != hash.None {
|
||||
fs.Debugf(f, "Using hash %v for ETag", Opt.HashType)
|
||||
}
|
||||
cmd.Run(false, false, command, func() error {
|
||||
s := newWebDAV(context.Background(), f, &httpflags.Opt)
|
||||
err := s.serve()
|
||||
s, err := newWebDAV(context.Background(), f, &Opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.serve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -107,7 +132,8 @@ to see the full list.
|
||||
// might apply". In particular, whether or not renaming a file or directory
|
||||
// overwriting another existing file or directory is an error is OS-dependent.
|
||||
type WebDAV struct {
|
||||
*httplib.Server
|
||||
*libhttp.Server
|
||||
opt Options
|
||||
f fs.Fs
|
||||
_vfs *vfs.VFS // don't use directly, use getVFS
|
||||
webdavhandler *webdav.Handler
|
||||
@@ -119,29 +145,61 @@ type WebDAV struct {
|
||||
var _ webdav.FileSystem = (*WebDAV)(nil)
|
||||
|
||||
// Make a new WebDAV to serve the remote
|
||||
func newWebDAV(ctx context.Context, f fs.Fs, opt *httplib.Options) *WebDAV {
|
||||
w := &WebDAV{
|
||||
func newWebDAV(ctx context.Context, f fs.Fs, opt *Options) (w *WebDAV, err error) {
|
||||
w = &WebDAV{
|
||||
f: f,
|
||||
ctx: ctx,
|
||||
opt: *opt,
|
||||
}
|
||||
if proxyflags.Opt.AuthProxy != "" {
|
||||
w.proxy = proxy.New(ctx, &proxyflags.Opt)
|
||||
// override auth
|
||||
copyOpt := *opt
|
||||
copyOpt.Auth = w.auth
|
||||
opt = ©Opt
|
||||
w.opt.Auth.CustomAuthFn = w.auth
|
||||
} else {
|
||||
w._vfs = vfs.New(f, &vfsflags.Opt)
|
||||
}
|
||||
w.Server = httplib.NewServer(http.HandlerFunc(w.handler), opt)
|
||||
|
||||
w.Server, err = libhttp.NewServer(ctx,
|
||||
libhttp.WithConfig(w.opt.HTTP),
|
||||
libhttp.WithAuth(w.opt.Auth),
|
||||
libhttp.WithTemplate(w.opt.Template),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init server: %w", err)
|
||||
}
|
||||
|
||||
webdavHandler := &webdav.Handler{
|
||||
Prefix: w.Server.Opt.BaseURL,
|
||||
Prefix: w.opt.HTTP.BaseURL,
|
||||
FileSystem: w,
|
||||
LockSystem: webdav.NewMemLS(),
|
||||
Logger: w.logRequest, // FIXME
|
||||
}
|
||||
w.webdavhandler = webdavHandler
|
||||
return w
|
||||
|
||||
router := w.Server.Router()
|
||||
router.Use(
|
||||
middleware.SetHeader("Accept-Ranges", "bytes"),
|
||||
middleware.SetHeader("Server", "rclone/"+fs.Version),
|
||||
)
|
||||
|
||||
router.Handle("/*", w)
|
||||
|
||||
// Webdav only methods not defined in chi
|
||||
methods := []string{
|
||||
"COPY", // Copies the resource.
|
||||
"LOCK", // Locks the resource.
|
||||
"MKCOL", // Creates the collection specified.
|
||||
"MOVE", // Moves the resource.
|
||||
"PROPFIND", // Performs a property find on the server.
|
||||
"PROPPATCH", // Sets or removes properties on the server.
|
||||
"UNLOCK", // Unlocks the resource.
|
||||
}
|
||||
for _, method := range methods {
|
||||
chi.RegisterMethod(method)
|
||||
router.Method(method, "/*", w)
|
||||
}
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Gets the VFS in use for this request
|
||||
@@ -149,7 +207,7 @@ func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) {
|
||||
if w._vfs != nil {
|
||||
return w._vfs, nil
|
||||
}
|
||||
value := ctx.Value(httplib.ContextAuthKey)
|
||||
value := libhttp.CtxGetAuth(ctx)
|
||||
if value == nil {
|
||||
return nil, errors.New("no VFS found in context")
|
||||
}
|
||||
@@ -169,17 +227,17 @@ func (w *WebDAV) auth(user, pass string) (value interface{}, err error) {
|
||||
return VFS, err
|
||||
}
|
||||
|
||||
func (w *WebDAV) handler(rw http.ResponseWriter, r *http.Request) {
|
||||
urlPath, ok := w.Path(rw, r)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
urlPath := r.URL.Path
|
||||
isDir := strings.HasSuffix(urlPath, "/")
|
||||
remote := strings.Trim(urlPath, "/")
|
||||
if !disableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
|
||||
if !w.opt.DisableGETDir && (r.Method == "GET" || r.Method == "HEAD") && isDir {
|
||||
w.serveDir(rw, r, remote)
|
||||
return
|
||||
}
|
||||
// Add URL Prefix back to path since webdavhandler needs to
|
||||
// return absolute references.
|
||||
r.URL.Path = w.opt.HTTP.BaseURL + r.URL.Path
|
||||
w.webdavhandler.ServeHTTP(rw, r)
|
||||
}
|
||||
|
||||
@@ -214,7 +272,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
}
|
||||
|
||||
// Make the entries for display
|
||||
directory := serve.NewDirectory(dirRemote, w.HTMLTemplate)
|
||||
directory := serve.NewDirectory(dirRemote, w.Server.HTMLTemplate())
|
||||
for _, node := range dirEntries {
|
||||
if vfsflags.Opt.NoModTime {
|
||||
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})
|
||||
@@ -234,11 +292,8 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
||||
//
|
||||
// Use s.Close() and s.Wait() to shutdown server
|
||||
func (w *WebDAV) serve() error {
|
||||
err := w.Serve()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Logf(w.f, "WebDav Server started on %s", w.URL())
|
||||
w.Serve()
|
||||
fs.Logf(w.f, "WebDav Server started on %s", w.URLs())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -273,7 +328,7 @@ func (w *WebDAV) OpenFile(ctx context.Context, name string, flags int, perm os.F
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Handle{f}, nil
|
||||
return Handle{Handle: f, w: w}, nil
|
||||
}
|
||||
|
||||
// RemoveAll removes a file or a directory and its contents
|
||||
@@ -315,12 +370,13 @@ func (w *WebDAV) Stat(ctx context.Context, name string) (fi os.FileInfo, err err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return FileInfo{fi}, nil
|
||||
return FileInfo{FileInfo: fi, w: w}, nil
|
||||
}
|
||||
|
||||
// Handle represents an open file
|
||||
type Handle struct {
|
||||
vfs.Handle
|
||||
w *WebDAV
|
||||
}
|
||||
|
||||
// Readdir reads directory entries from the handle
|
||||
@@ -331,7 +387,7 @@ func (h Handle) Readdir(count int) (fis []os.FileInfo, err error) {
|
||||
}
|
||||
// Wrap each FileInfo
|
||||
for i := range fis {
|
||||
fis[i] = FileInfo{fis[i]}
|
||||
fis[i] = FileInfo{FileInfo: fis[i], w: h.w}
|
||||
}
|
||||
return fis, nil
|
||||
}
|
||||
@@ -342,19 +398,20 @@ func (h Handle) Stat() (fi os.FileInfo, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return FileInfo{fi}, nil
|
||||
return FileInfo{FileInfo: fi, w: h.w}, nil
|
||||
}
|
||||
|
||||
// FileInfo represents info about a file satisfying os.FileInfo and
|
||||
// also some additional interfaces for webdav for ETag and ContentType
|
||||
type FileInfo struct {
|
||||
os.FileInfo
|
||||
w *WebDAV
|
||||
}
|
||||
|
||||
// ETag returns an ETag for the FileInfo
|
||||
func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
||||
// defer log.Trace(fi, "")("etag=%q, err=%v", &etag, &err)
|
||||
if hashType == hash.None {
|
||||
if fi.w.opt.HashType == hash.None {
|
||||
return "", webdav.ErrNotImplemented
|
||||
}
|
||||
node, ok := (fi.FileInfo).(vfs.Node)
|
||||
@@ -367,7 +424,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
||||
if !ok {
|
||||
return "", webdav.ErrNotImplemented
|
||||
}
|
||||
hash, err := o.Hash(ctx, hashType)
|
||||
hash, err := o.Hash(ctx, fi.w.opt.HashType)
|
||||
if err != nil || hash == "" {
|
||||
return "", webdav.ErrNotImplemented
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/cmd/serve/servetest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
@@ -40,9 +39,9 @@ const (
|
||||
|
||||
// check interfaces
|
||||
var (
|
||||
_ os.FileInfo = FileInfo{nil}
|
||||
_ webdav.ETager = FileInfo{nil}
|
||||
_ webdav.ContentTyper = FileInfo{nil}
|
||||
_ os.FileInfo = FileInfo{nil, nil}
|
||||
_ webdav.ETager = FileInfo{nil, nil}
|
||||
_ webdav.ContentTyper = FileInfo{nil, nil}
|
||||
)
|
||||
|
||||
// TestWebDav runs the webdav server then runs the unit tests for the
|
||||
@@ -50,28 +49,30 @@ var (
|
||||
func TestWebDav(t *testing.T) {
|
||||
// Configure and start the server
|
||||
start := func(f fs.Fs) (configmap.Simple, func()) {
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.BasicUser = testUser
|
||||
opt.BasicPass = testPass
|
||||
opt.Template = testTemplate
|
||||
hashType = hash.MD5
|
||||
opt := DefaultOpt
|
||||
opt.HTTP.ListenAddr = []string{testBindAddress}
|
||||
opt.HTTP.BaseURL = "/prefix"
|
||||
opt.Auth.BasicUser = testUser
|
||||
opt.Auth.BasicPass = testPass
|
||||
opt.Template.Path = testTemplate
|
||||
opt.HashType = hash.MD5
|
||||
|
||||
// Start the server
|
||||
w := newWebDAV(context.Background(), f, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
w, err := newWebDAV(context.Background(), f, &opt)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, w.serve())
|
||||
|
||||
// Config for the backend we'll use to connect to the server
|
||||
config := configmap.Simple{
|
||||
"type": "webdav",
|
||||
"vendor": "other",
|
||||
"url": w.Server.URL(),
|
||||
"url": w.Server.URLs()[0],
|
||||
"user": testUser,
|
||||
"pass": obscure.MustObscure(testPass),
|
||||
}
|
||||
|
||||
return config, func() {
|
||||
w.Close()
|
||||
assert.NoError(t, w.Shutdown())
|
||||
w.Wait()
|
||||
}
|
||||
}
|
||||
@@ -98,18 +99,19 @@ func TestHTTPFunction(t *testing.T) {
|
||||
f, err := fs.NewFs(context.Background(), "../http/testdata/files")
|
||||
assert.NoError(t, err)
|
||||
|
||||
opt := httplib.DefaultOpt
|
||||
opt.ListenAddr = testBindAddress
|
||||
opt.Template = testTemplate
|
||||
opt := DefaultOpt
|
||||
opt.HTTP.ListenAddr = []string{testBindAddress}
|
||||
opt.Template.Path = testTemplate
|
||||
|
||||
// Start the server
|
||||
w := newWebDAV(context.Background(), f, &opt)
|
||||
assert.NoError(t, w.serve())
|
||||
w, err := newWebDAV(context.Background(), f, &opt)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, w.serve())
|
||||
defer func() {
|
||||
w.Close()
|
||||
assert.NoError(t, w.Shutdown())
|
||||
w.Wait()
|
||||
}()
|
||||
testURL := w.Server.URL()
|
||||
testURL := w.Server.URLs()[0]
|
||||
pause := time.Millisecond
|
||||
i := 0
|
||||
for ; i < 10; i++ {
|
||||
|
||||
@@ -40,6 +40,9 @@ Or just provide remote directory and all files in directory will be tiered
|
||||
|
||||
rclone settier tier remote:path/dir
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.44",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
tier := args[0]
|
||||
|
||||
@@ -41,6 +41,9 @@ as a relative path).
|
||||
This command can also hash data received on STDIN, if not passing
|
||||
a remote:path.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.SHA1, args, 0); found {
|
||||
|
||||
@@ -44,6 +44,9 @@ Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.23",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -26,6 +26,9 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "changenotify remote:",
|
||||
Short: `Log any change notify requests for the remote passed in.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -28,6 +28,9 @@ in filenames in the remote:path specified.
|
||||
The data doesn't contain any identifying information but is useful for
|
||||
the rclone developers when developing filename compression.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsDir(args)
|
||||
|
||||
@@ -66,6 +66,9 @@ a bit of go code for each one.
|
||||
|
||||
**NB** this can create undeletable files and other hazards - use with care
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !all {
|
||||
|
||||
@@ -74,6 +74,9 @@ func init() {
|
||||
var makefilesCmd = &cobra.Command{
|
||||
Use: "makefiles <dir>",
|
||||
Short: `Make a random file hierarchy in a directory`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
commonInit()
|
||||
@@ -105,6 +108,9 @@ var makefilesCmd = &cobra.Command{
|
||||
var makefileCmd = &cobra.Command{
|
||||
Use: "makefile <size> [<file>]+ [flags]",
|
||||
Short: `Make files with random contents of the size given`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.59",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1e6, command, args)
|
||||
commonInit()
|
||||
|
||||
@@ -20,12 +20,16 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "memory remote:path",
|
||||
Short: `Load all the objects at remote:path into memory and report memory stats.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
ctx := context.Background()
|
||||
ci := fs.GetConfig(context.Background())
|
||||
metadata := ci.Metadata && fsrc.Features().ReadMetadata
|
||||
objects, _, _, err := operations.Count(ctx, fsrc)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -36,6 +40,13 @@ var commandDefinition = &cobra.Command{
|
||||
runtime.ReadMemStats(&before)
|
||||
var mu sync.Mutex
|
||||
err = operations.ListFn(ctx, fsrc, func(o fs.Object) {
|
||||
// Read the metadata so it gets cached in the object
|
||||
if metadata {
|
||||
_, err := fs.GetMetadata(ctx, o)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to read metadata: %v", err)
|
||||
}
|
||||
}
|
||||
mu.Lock()
|
||||
objs = append(objs, o)
|
||||
mu.Unlock()
|
||||
|
||||
@@ -25,4 +25,7 @@ Each subcommand has its own options which you can see in their help.
|
||||
**NB** Be careful running these commands, they may do strange things
|
||||
so reading their documentation first is recommended.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -64,6 +64,9 @@ time instead of the current time. Times may be specified as one of:
|
||||
Note that value of ` + "`--timestamp`" + ` is in UTC. If you want local time
|
||||
then add the ` + "`--localtime`" + ` flag.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f, remote := newFsDst(args)
|
||||
|
||||
@@ -28,7 +28,6 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestTouchOneFile(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := Touch(context.Background(), r.Fremote, "newFile")
|
||||
require.NoError(t, err)
|
||||
@@ -38,7 +37,6 @@ func TestTouchOneFile(t *testing.T) {
|
||||
|
||||
func TestTouchWithNoCreateFlag(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
notCreateNewFile = true
|
||||
err := Touch(context.Background(), r.Fremote, "newFile")
|
||||
@@ -50,7 +48,6 @@ func TestTouchWithNoCreateFlag(t *testing.T) {
|
||||
|
||||
func TestTouchWithTimestamp(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
timeAsArgument = "060102"
|
||||
srcFileName := "oldFile"
|
||||
@@ -61,7 +58,6 @@ func TestTouchWithTimestamp(t *testing.T) {
|
||||
|
||||
func TestTouchWithLongerTimestamp(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
timeAsArgument = "2006-01-02T15:04:05"
|
||||
srcFileName := "oldFile"
|
||||
@@ -72,7 +68,6 @@ func TestTouchWithLongerTimestamp(t *testing.T) {
|
||||
|
||||
func TestTouchUpdateTimestamp(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
srcFileName := "a"
|
||||
content := "aaa"
|
||||
@@ -87,7 +82,6 @@ func TestTouchUpdateTimestamp(t *testing.T) {
|
||||
|
||||
func TestTouchUpdateTimestampWithCFlag(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
srcFileName := "a"
|
||||
content := "aaa"
|
||||
@@ -104,7 +98,6 @@ func TestTouchUpdateTimestampWithCFlag(t *testing.T) {
|
||||
|
||||
func TestTouchCreateMultipleDirAndFile(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
longPath := "a/b/c.txt"
|
||||
err := Touch(context.Background(), r.Fremote, longPath)
|
||||
@@ -115,7 +108,6 @@ func TestTouchCreateMultipleDirAndFile(t *testing.T) {
|
||||
|
||||
func TestTouchEmptyName(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := Touch(context.Background(), r.Fremote, "")
|
||||
require.NoError(t, err)
|
||||
@@ -124,7 +116,6 @@ func TestTouchEmptyName(t *testing.T) {
|
||||
|
||||
func TestTouchEmptyDir(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := r.Fremote.Mkdir(context.Background(), "a")
|
||||
require.NoError(t, err)
|
||||
@@ -135,7 +126,6 @@ func TestTouchEmptyDir(t *testing.T) {
|
||||
|
||||
func TestTouchDirWithFiles(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := r.Fremote.Mkdir(context.Background(), "a")
|
||||
require.NoError(t, err)
|
||||
@@ -148,7 +138,6 @@ func TestTouchDirWithFiles(t *testing.T) {
|
||||
|
||||
func TestRecursiveTouchDirWithFiles(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := r.Fremote.Mkdir(context.Background(), "a/b/c")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -61,7 +61,6 @@ func init() {
|
||||
flags.StringVarP(cmdFlags, &sort, "sort", "", "", "Select sort: name,version,size,mtime,ctime")
|
||||
// Graphics
|
||||
flags.BoolVarP(cmdFlags, &opts.NoIndent, "noindent", "", false, "Don't print indentation lines")
|
||||
flags.BoolVarP(cmdFlags, &opts.Colorize, "color", "C", false, "Turn colorization on always")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -95,6 +94,9 @@ short options as they conflict with rclone's short options.
|
||||
For a more interactive navigation of the remote see the
|
||||
[ncdu](/commands/rclone_ncdu/) command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
@@ -113,6 +115,7 @@ For a more interactive navigation of the remote see the
|
||||
opts.SizeSort = sort == "size"
|
||||
ci := fs.GetConfig(context.Background())
|
||||
opts.UnitSize = ci.HumanReadable
|
||||
opts.Colorize = ci.TerminalColorMode != fs.TerminalColorModeNever
|
||||
if opts.DeepLevel == 0 {
|
||||
opts.DeepLevel = ci.MaxDepth
|
||||
}
|
||||
|
||||
@@ -67,6 +67,9 @@ Or
|
||||
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
||||
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
if check {
|
||||
|
||||
@@ -111,13 +111,6 @@ func createTestEnvironment(t *testing.T) {
|
||||
var testFolder string
|
||||
var testConfig string
|
||||
|
||||
// removeTestEnvironment removes the test environment created by createTestEnvironment
|
||||
func removeTestEnvironment(t *testing.T) {
|
||||
// Remove temporary folder with all contents
|
||||
err := os.RemoveAll(testFolder)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// createTestFile creates the file testFolder/name
|
||||
func createTestFile(name string, t *testing.T) string {
|
||||
err := os.WriteFile(testFolder+"/"+name, []byte("content_of_"+name), 0666)
|
||||
@@ -146,19 +139,18 @@ func createSimpleTestData(t *testing.T) string {
|
||||
createTestFolder("testdata/folderB", t)
|
||||
createTestFile("testdata/folderB/fileB1.txt", t)
|
||||
createTestFile("testdata/folderB/fileB2.txt", t)
|
||||
return testFolder + "/testdata"
|
||||
}
|
||||
|
||||
// removeSimpleTestData removes the test data created by createSimpleTestData
|
||||
func removeSimpleTestData(t *testing.T) {
|
||||
err := os.RemoveAll(testFolder + "/testdata")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
err := os.RemoveAll(testFolder + "/testdata")
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
return testFolder + "/testdata"
|
||||
}
|
||||
|
||||
// TestCmdTest demonstrates and verifies the test functions for end-to-end testing of rclone
|
||||
func TestCmdTest(t *testing.T) {
|
||||
createTestEnvironment(t)
|
||||
defer removeTestEnvironment(t)
|
||||
|
||||
// Test simple call and output from rclone
|
||||
out, err := rclone("version")
|
||||
@@ -213,7 +205,6 @@ func TestCmdTest(t *testing.T) {
|
||||
|
||||
// Test creation of simple test data
|
||||
createSimpleTestData(t)
|
||||
defer removeSimpleTestData(t)
|
||||
|
||||
// Test access to config file and simple test data
|
||||
out, err = rclone("lsl", "myLocal:"+testFolder)
|
||||
|
||||
@@ -17,10 +17,8 @@ import (
|
||||
func TestEnvironmentVariables(t *testing.T) {
|
||||
|
||||
createTestEnvironment(t)
|
||||
defer removeTestEnvironment(t)
|
||||
|
||||
testdataPath := createSimpleTestData(t)
|
||||
defer removeSimpleTestData(t)
|
||||
|
||||
// Non backend flags
|
||||
// =================
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user