mirror of
https://github.com/rclone/rclone.git
synced 2025-12-24 12:13:19 +00:00
Compare commits
1 Commits
pr-6811-dr
...
fix-vfs-la
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7cba2835d |
36
.github/workflows/build.yml
vendored
36
.github/workflows/build.yml
vendored
@@ -15,24 +15,22 @@ on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
manual:
|
||||
description: Manual run (bypass default conditions)
|
||||
type: boolean
|
||||
required: true
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.18', 'go1.19']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.17', 'go1.18']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -43,14 +41,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -59,14 +57,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -76,20 +74,20 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
go: '1.19'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.18
|
||||
- job_name: go1.17
|
||||
os: ubuntu-latest
|
||||
go: '1.18'
|
||||
go: '1.17'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.19
|
||||
- job_name: go1.18
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
go: '1.18'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
@@ -124,7 +122,7 @@ jobs:
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse3 libfuse-dev rpm pkg-config
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
@@ -220,7 +218,7 @@ jobs:
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
lint:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "lint"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -239,7 +237,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@@ -249,7 +247,7 @@ jobs:
|
||||
run: govulncheck ./...
|
||||
|
||||
android:
|
||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
||||
if: ${{ github.repository == 'rclone/rclone' || github.event.inputs.manual }}
|
||||
timeout-minutes: 30
|
||||
name: "android-all"
|
||||
runs-on: ubuntu-latest
|
||||
@@ -264,7 +262,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.20'
|
||||
go-version: 1.19
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
|
||||
2580
MANUAL.html
generated
2580
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2883
MANUAL.txt
generated
2883
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
3
Makefile
3
Makefile
@@ -81,9 +81,6 @@ quicktest:
|
||||
racequicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race ./...
|
||||
|
||||
compiletest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -run XXX ./...
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
|
||||
@@ -50,7 +50,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
|
||||
@@ -74,7 +74,8 @@ Set vars
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* git co -b ${BASE_TAG}-stable ${BASE_TAG}.0
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-stable
|
||||
* git co ${BASE_TAG}-stable
|
||||
* make startstable
|
||||
|
||||
Now
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
//go:build !plan9 && !solaris && !js && go1.18
|
||||
// +build !plan9,!solaris,!js,go1.18
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -16,12 +17,10 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -33,6 +32,36 @@ var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
// TestServicePrincipalFileSuccess checks that, given a proper JSON file, we can create a token.
|
||||
func TestServicePrincipalFileSuccess(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"password": "my secret",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
if assert.NoError(t, err) {
|
||||
assert.NotNil(t, tokenRefresher)
|
||||
}
|
||||
}
|
||||
|
||||
// TestServicePrincipalFileFailure checks that, given a JSON file with a missing secret, it returns an error.
|
||||
func TestServicePrincipalFileFailure(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
credentials := `
|
||||
{
|
||||
"appId": "my application (client) ID",
|
||||
"tenant": "my active directory tenant ID"
|
||||
}
|
||||
`
|
||||
_, err := newServicePrincipalTokenRefresher(ctx, []byte(credentials))
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "error creating service principal token: parameter 'secret' cannot be empty")
|
||||
}
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
accessTier string
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
//go:build plan9 || solaris || js || !go1.18
|
||||
// +build plan9 solaris js !go1.18
|
||||
//go:build plan9 || solaris || js
|
||||
// +build plan9 solaris js
|
||||
|
||||
package azureblob
|
||||
|
||||
136
backend/azureblob/imds.go
Normal file
136
backend/azureblob/imds.go
Normal file
@@ -0,0 +1,136 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
const (
|
||||
azureResource = "https://storage.azure.com"
|
||||
imdsAPIVersion = "2018-02-01"
|
||||
msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token"
|
||||
)
|
||||
|
||||
// This custom type is used to add the port the test server has bound to
|
||||
// to the request context.
|
||||
type testPortKey string
|
||||
|
||||
type msiIdentifierType int
|
||||
|
||||
const (
|
||||
msiClientID msiIdentifierType = iota
|
||||
msiObjectID
|
||||
msiResourceID
|
||||
)
|
||||
|
||||
type userMSI struct {
|
||||
Type msiIdentifierType
|
||||
Value string
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
Response *http.Response
|
||||
}
|
||||
|
||||
func (e httpError) Error() string {
|
||||
return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status)
|
||||
}
|
||||
|
||||
// GetMSIToken attempts to obtain an MSI token from the Azure Instance
|
||||
// Metadata Service.
|
||||
func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) {
|
||||
// Attempt to get an MSI token; silently continue if unsuccessful.
|
||||
// This code has been lovingly stolen from azcopy's OAuthTokenManager.
|
||||
result := adal.Token{}
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Failed to create request: %v", err)
|
||||
return result, err
|
||||
}
|
||||
params := req.URL.Query()
|
||||
params.Set("resource", azureResource)
|
||||
params.Set("api-version", imdsAPIVersion)
|
||||
|
||||
// Specify user-assigned identity if requested.
|
||||
if identity != nil {
|
||||
switch identity.Type {
|
||||
case msiClientID:
|
||||
params.Set("client_id", identity.Value)
|
||||
case msiObjectID:
|
||||
params.Set("object_id", identity.Value)
|
||||
case msiResourceID:
|
||||
params.Set("mi_res_id", identity.Value)
|
||||
default:
|
||||
// If this happens, the calling function and this one don't agree on
|
||||
// what valid ID types exist.
|
||||
return result, fmt.Errorf("unknown MSI identity type specified")
|
||||
}
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
// The Metadata header is required by all calls to IMDS.
|
||||
req.Header.Set("Metadata", "true")
|
||||
|
||||
// If this function is run in a test, query the test server instead of IMDS.
|
||||
testPort, isTest := ctx.Value(testPortKey("testPort")).(int)
|
||||
if isTest {
|
||||
req.URL.Host = fmt.Sprintf("localhost:%d", testPort)
|
||||
req.Host = req.URL.Host
|
||||
}
|
||||
|
||||
// Send request
|
||||
httpClient := fshttp.NewClient(ctx)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("MSI is not enabled on this VM: %w", err)
|
||||
}
|
||||
defer func() { // resp and Body should not be nil
|
||||
_, err = io.Copy(io.Discard, resp.Body)
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to drain IMDS response: %v", err)
|
||||
}
|
||||
err = resp.Body.Close()
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "Unable to close IMDS response: %v", err)
|
||||
}
|
||||
}()
|
||||
// Check if the status code indicates success
|
||||
// The request returns 200 currently, add 201 and 202 as well for possible extension.
|
||||
switch resp.StatusCode {
|
||||
case 200, 201, 202:
|
||||
break
|
||||
default:
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body))
|
||||
return result, httpError{Response: resp}
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't read IMDS response: %w", err)
|
||||
}
|
||||
// Remove BOM, if any. azcopy does this so I'm following along.
|
||||
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
|
||||
|
||||
// This would be a good place to persist the token if a large number of rclone
|
||||
// invocations are being made in a short amount of time. If the token is
|
||||
// persisted, the azureblob code will need to check for expiry before every
|
||||
// storage API call.
|
||||
err = json.Unmarshal(b, &result)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("couldn't unmarshal IMDS response: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
118
backend/azureblob/imds_test.go
Normal file
118
backend/azureblob/imds_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func handler(t *testing.T, actual *map[string]string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
require.NoError(t, err)
|
||||
parameters := r.URL.Query()
|
||||
(*actual)["path"] = r.URL.Path
|
||||
(*actual)["Metadata"] = r.Header.Get("Metadata")
|
||||
(*actual)["method"] = r.Method
|
||||
for paramName := range parameters {
|
||||
(*actual)[paramName] = parameters.Get(paramName)
|
||||
}
|
||||
// Make response.
|
||||
response := adal.Token{}
|
||||
responseBytes, err := json.Marshal(response)
|
||||
require.NoError(t, err)
|
||||
_, err = w.Write(responseBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestManagedIdentity(t *testing.T) {
|
||||
// test user-assigned identity specifiers to use
|
||||
testMSIClientID := "d859b29f-5c9c-42f8-a327-ec1bc6408d79"
|
||||
testMSIObjectID := "9ffeb650-3ca0-4278-962b-5a38d520591a"
|
||||
testMSIResourceID := "/subscriptions/fe714c49-b8a4-4d49-9388-96a20daa318f/resourceGroups/somerg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/someidentity"
|
||||
tests := []struct {
|
||||
identity *userMSI
|
||||
identityParameterName string
|
||||
expectedAbsent []string
|
||||
}{
|
||||
{&userMSI{msiClientID, testMSIClientID}, "client_id", []string{"object_id", "mi_res_id"}},
|
||||
{&userMSI{msiObjectID, testMSIObjectID}, "object_id", []string{"client_id", "mi_res_id"}},
|
||||
{&userMSI{msiResourceID, testMSIResourceID}, "mi_res_id", []string{"object_id", "client_id"}},
|
||||
{nil, "(default)", []string{"object_id", "client_id", "mi_res_id"}},
|
||||
}
|
||||
alwaysExpected := map[string]string{
|
||||
"path": "/metadata/identity/oauth2/token",
|
||||
"resource": "https://storage.azure.com",
|
||||
"Metadata": "true",
|
||||
"api-version": "2018-02-01",
|
||||
"method": "GET",
|
||||
}
|
||||
for _, test := range tests {
|
||||
actual := make(map[string]string, 10)
|
||||
testServer := httptest.NewServer(handler(t, &actual))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, test.identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate expected query parameters present
|
||||
expected := make(map[string]string)
|
||||
for k, v := range alwaysExpected {
|
||||
expected[k] = v
|
||||
}
|
||||
if test.identity != nil {
|
||||
expected[test.identityParameterName] = test.identity.Value
|
||||
}
|
||||
|
||||
for key := range expected {
|
||||
value, exists := actual[key]
|
||||
if assert.Truef(t, exists, "test of %s: query parameter %s was not passed",
|
||||
test.identityParameterName, key) {
|
||||
assert.Equalf(t, expected[key], value,
|
||||
"test of %s: parameter %s has incorrect value", test.identityParameterName, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate unexpected query parameters absent
|
||||
for _, key := range test.expectedAbsent {
|
||||
_, exists := actual[key]
|
||||
assert.Falsef(t, exists, "query parameter %s was unexpectedly passed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func errorHandler(resultCode int) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Test error generated", resultCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIMDSErrors(t *testing.T) {
|
||||
errorCodes := []int{404, 429, 500}
|
||||
for _, code := range errorCodes {
|
||||
testServer := httptest.NewServer(errorHandler(code))
|
||||
defer testServer.Close()
|
||||
testServerPort, err := strconv.Atoi(strings.Split(testServer.URL, ":")[2])
|
||||
require.NoError(t, err)
|
||||
ctx := context.WithValue(context.TODO(), testPortKey("testPort"), testServerPort)
|
||||
_, err = GetMSIToken(ctx, nil)
|
||||
require.Error(t, err)
|
||||
httpErr, ok := err.(httpError)
|
||||
require.Truef(t, ok, "HTTP error %d did not result in an httpError object", code)
|
||||
assert.Equalf(t, httpErr.Response.StatusCode, code, "desired error %d but didn't get it", code)
|
||||
}
|
||||
}
|
||||
@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/pool"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -430,47 +428,18 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
defer atexit.OnError(&err, func() { _ = up.cancel(ctx) })()
|
||||
fs.Debugf(up.o, "Starting %s of large file in %d chunks (id %q)", up.what, up.parts, up.id)
|
||||
var (
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
uploadPool *pool.Pool
|
||||
ci = fs.GetConfig(ctx)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
remaining = up.size
|
||||
)
|
||||
// If using large chunk size then make a temporary pool
|
||||
if up.chunkSize <= int64(up.f.opt.ChunkSize) {
|
||||
uploadPool = up.f.pool
|
||||
} else {
|
||||
uploadPool = pool.New(
|
||||
time.Duration(up.f.opt.MemoryPoolFlushTime),
|
||||
int(up.chunkSize),
|
||||
ci.Transfers,
|
||||
up.f.opt.MemoryPoolUseMmap,
|
||||
)
|
||||
defer uploadPool.Flush()
|
||||
}
|
||||
// Get an upload token and a buffer
|
||||
getBuf := func() (buf []byte) {
|
||||
up.f.getBuf(true)
|
||||
if !up.doCopy {
|
||||
buf = uploadPool.Get()
|
||||
}
|
||||
return buf
|
||||
}
|
||||
// Put an upload token and a buffer
|
||||
putBuf := func(buf []byte) {
|
||||
if !up.doCopy {
|
||||
uploadPool.Put(buf)
|
||||
}
|
||||
up.f.putBuf(nil, true)
|
||||
}
|
||||
g.Go(func() error {
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Get a block of memory from the pool and token which limits concurrency.
|
||||
buf := getBuf()
|
||||
buf := up.f.getBuf(up.doCopy)
|
||||
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
putBuf(buf)
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -484,14 +453,14 @@ func (up *largeUpload) Upload(ctx context.Context) (err error) {
|
||||
buf = buf[:reqSize]
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
putBuf(buf)
|
||||
up.f.putBuf(buf, up.doCopy)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
part := part // for the closure
|
||||
g.Go(func() (err error) {
|
||||
defer putBuf(buf)
|
||||
defer up.f.putBuf(buf, up.doCopy)
|
||||
if !up.doCopy {
|
||||
err = up.transferChunk(gCtx, part, buf)
|
||||
} else {
|
||||
|
||||
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -1038,7 +1038,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
fs.Debugf(dir, "list: remove entry: %v", entryRemote)
|
||||
}
|
||||
entries = nil //nolint:ineffassign
|
||||
entries = nil
|
||||
|
||||
// and then iterate over the ones from source (temp Objects will override source ones)
|
||||
var batchDirectories []*Directory
|
||||
|
||||
61
backend/cache/cache_internal_test.go
vendored
61
backend/cache/cache_internal_test.go
vendored
@@ -101,12 +101,14 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
id := fmt.Sprintf("tilrair%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
// Instantiate inner fs
|
||||
innerFolder := "inner"
|
||||
runInstance.mkdir(t, rootFs, innerFolder)
|
||||
rootFs2, _ := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil)
|
||||
rootFs2, boltDb2 := runInstance.newCacheFs(t, remoteName, id+"/"+innerFolder, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs2, boltDb2)
|
||||
|
||||
runInstance.writeObjectString(t, rootFs2, "one", "content")
|
||||
listRoot, err := runInstance.list(t, rootFs, "")
|
||||
@@ -223,7 +225,8 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
|
||||
func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tiowff%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -255,7 +258,8 @@ func TestInternalObjWrapFsFound(t *testing.T) {
|
||||
|
||||
func TestInternalObjNotFound(t *testing.T) {
|
||||
id := fmt.Sprintf("tionf%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
@@ -265,7 +269,8 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
func TestInternalCachedWrittenContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticwcm%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -292,7 +297,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("tidwcm%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
// write the object
|
||||
runInstance.writeRemoteString(t, rootFs, "one", "one content")
|
||||
@@ -310,7 +316,8 @@ func TestInternalDoubleWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testy.SkipUnreliable(t)
|
||||
id := fmt.Sprintf("ticucm%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
var err error
|
||||
|
||||
// create some rand test data
|
||||
@@ -339,7 +346,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -369,7 +377,8 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
||||
func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
id := fmt.Sprintf("tilwcm%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skip("test skipped with crypt remote")
|
||||
}
|
||||
@@ -395,7 +404,8 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
||||
|
||||
func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
id := fmt.Sprintf("tiwfcns%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -449,7 +459,8 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
|
||||
func TestInternalMoveWithNotify(t *testing.T) {
|
||||
id := fmt.Sprintf("timwn%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -535,7 +546,8 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
|
||||
func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
id := fmt.Sprintf("tincep%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
if !runInstance.wrappedIsExternal {
|
||||
t.Skipf("Not external")
|
||||
}
|
||||
@@ -621,7 +633,8 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
|
||||
func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
id := fmt.Sprintf("ticsadcf%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -653,7 +666,8 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
|
||||
func TestInternalCacheWrites(t *testing.T) {
|
||||
id := "ticw"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"writes": "true"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -674,7 +688,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
t.Skip("Skip test on windows/386")
|
||||
}
|
||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"workers": "1"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
@@ -709,7 +724,8 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
id := fmt.Sprintf("tieer%v", time.Now().Unix())
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 4 // needs to be lower than the defined
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true, nil)
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, map[string]string{"info_age": "5s"}, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -746,7 +762,9 @@ func TestInternalBug2117(t *testing.T) {
|
||||
vfsflags.Opt.DirCacheTime = time.Second * 10
|
||||
|
||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, false, true, map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
t.Skipf("skipping crypt")
|
||||
@@ -847,7 +865,7 @@ func (r *run) encryptRemoteIfNeeded(t *testing.T, remote string) string {
|
||||
return enc
|
||||
}
|
||||
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool, cfg map[string]string, flags map[string]string) (fs.Fs, *cache.Persistent) {
|
||||
fstest.Initialise()
|
||||
remoteExists := false
|
||||
for _, s := range config.FileSections() {
|
||||
@@ -940,15 +958,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
runInstance.cleanupFs(t, f)
|
||||
})
|
||||
|
||||
return f, boltDb
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
|
||||
24
backend/cache/cache_upload_test.go
vendored
24
backend/cache/cache_upload_test.go
vendored
@@ -21,8 +21,10 @@ import (
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
@@ -61,7 +63,9 @@ func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltD
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
@@ -69,15 +73,19 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -111,8 +119,10 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
@@ -152,8 +162,10 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, _ := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
@@ -201,7 +213,9 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
@@ -329,7 +343,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
|
||||
@@ -631,7 +631,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uSrc := fs.NewOverrideRemote(src, uRemote)
|
||||
uSrc := operations.NewOverrideRemote(src, uRemote)
|
||||
var o fs.Object
|
||||
if stream {
|
||||
o, err = u.f.Features().PutStream(ctx, in, uSrc, options...)
|
||||
|
||||
@@ -235,7 +235,7 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -396,8 +396,6 @@ type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ..
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
if f.opt.NoDataEncryption {
|
||||
o, err := put(ctx, in, f.newObjectInfo(src, nonce{}), options...)
|
||||
if err == nil && o != nil {
|
||||
@@ -415,9 +413,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
if ci.IgnoreChecksum {
|
||||
ht = hash.None
|
||||
}
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
@@ -1052,11 +1047,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
// Get the underlying object if there is one
|
||||
if srcObj, ok = o.ObjectInfo.(fs.Object); ok {
|
||||
// Prefer direct interface assertion
|
||||
} else if do, ok := o.ObjectInfo.(*fs.OverrideRemote); ok {
|
||||
// Unwrap if it is an operations.OverrideRemote
|
||||
} else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok {
|
||||
// Otherwise likely is an operations.OverrideRemote
|
||||
srcObj = do.UnWrap()
|
||||
} else {
|
||||
// Otherwise don't unwrap any further
|
||||
return "", nil
|
||||
}
|
||||
// if this is wrapping a local object then we work out the hash
|
||||
|
||||
@@ -17,28 +17,41 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testWrapper struct {
|
||||
fs.ObjectInfo
|
||||
}
|
||||
|
||||
// UnWrap returns the Object that this Object is wrapping or nil if it
|
||||
// isn't wrapping anything
|
||||
func (o testWrapper) UnWrap() fs.Object {
|
||||
if o, ok := o.ObjectInfo.(fs.Object); ok {
|
||||
return o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a temporary local fs to upload things from
|
||||
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs) {
|
||||
func makeTempLocalFs(t *testing.T) (localFs fs.Fs, cleanup func()) {
|
||||
localFs, err := fs.TemporaryLocalFs(context.Background())
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
cleanup = func() {
|
||||
require.NoError(t, localFs.Rmdir(context.Background(), ""))
|
||||
})
|
||||
return localFs
|
||||
}
|
||||
return localFs, cleanup
|
||||
}
|
||||
|
||||
// Upload a file to a remote
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object) {
|
||||
func uploadFile(t *testing.T, f fs.Fs, remote, contents string) (obj fs.Object, cleanup func()) {
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
t1 := time.Date(2012, time.December, 17, 18, 32, 31, 0, time.UTC)
|
||||
upSrc := object.NewStaticObjectInfo(remote, t1, int64(len(contents)), true, nil, nil)
|
||||
obj, err := f.Put(context.Background(), inBuf, upSrc)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
cleanup = func() {
|
||||
require.NoError(t, obj.Remove(context.Background()))
|
||||
})
|
||||
return obj
|
||||
}
|
||||
return obj, cleanup
|
||||
}
|
||||
|
||||
// Test the ObjectInfo
|
||||
@@ -52,9 +65,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
path = "_wrap"
|
||||
}
|
||||
|
||||
localFs := makeTempLocalFs(t)
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
|
||||
obj := uploadFile(t, localFs, path, contents)
|
||||
obj, cleanupObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupObj()
|
||||
|
||||
// encrypt the data
|
||||
inBuf := bytes.NewBufferString(contents)
|
||||
@@ -68,7 +83,7 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
|
||||
var oi fs.ObjectInfo = obj
|
||||
if wrap {
|
||||
// wrap the object in an fs.ObjectUnwrapper if required
|
||||
oi = fs.NewOverrideRemote(oi, "new_remote")
|
||||
oi = testWrapper{oi}
|
||||
}
|
||||
|
||||
// wrap the object in a crypt for upload using the nonce we
|
||||
@@ -101,13 +116,16 @@ func testComputeHash(t *testing.T, f *Fs) {
|
||||
t.Skipf("%v: does not support hashes", f.Fs)
|
||||
}
|
||||
|
||||
localFs := makeTempLocalFs(t)
|
||||
localFs, cleanupLocalFs := makeTempLocalFs(t)
|
||||
defer cleanupLocalFs()
|
||||
|
||||
// Upload a file to localFs as a test object
|
||||
localObj := uploadFile(t, localFs, path, contents)
|
||||
localObj, cleanupLocalObj := uploadFile(t, localFs, path, contents)
|
||||
defer cleanupLocalObj()
|
||||
|
||||
// Upload the same data to the remote Fs also
|
||||
remoteObj := uploadFile(t, f, path, contents)
|
||||
remoteObj, cleanupRemoteObj := uploadFile(t, f, path, contents)
|
||||
defer cleanupRemoteObj()
|
||||
|
||||
// Calculate the expected Hash of the remote object
|
||||
computedHash, err := f.ComputeHash(ctx, remoteObj.(*Object), localObj, hashType)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -202,7 +203,7 @@ func init() {
|
||||
m.Set("root_folder_id", "appDataFolder")
|
||||
}
|
||||
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" && !opt.EnvAuth {
|
||||
if opt.ServiceAccountFile == "" && opt.ServiceAccountCredentials == "" {
|
||||
return oauthutil.ConfigOut("teamdrive", &oauthutil.Options{
|
||||
OAuth2Config: driveConfig,
|
||||
})
|
||||
@@ -451,11 +452,7 @@ If downloading a file returns the error "This file has been identified
|
||||
as malware or spam and cannot be downloaded" with the error code
|
||||
"cannotDownloadAbusiveFile" then supply this flag to rclone to
|
||||
indicate you acknowledge the risks of downloading the file and rclone
|
||||
will download it anyway.
|
||||
|
||||
Note that if you are using service account it will need Manager
|
||||
permission (not Content Manager) to for this flag to work. If the SA
|
||||
does not have the right permission, Google will just ignore the flag.`,
|
||||
will download it anyway.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "keep_revision_forever",
|
||||
@@ -598,18 +595,6 @@ resource key is no needed.
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
// Don't encode / as it's a valid name character in drive.
|
||||
Default: encoder.EncodeInvalidUtf8,
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter AWS credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
|
||||
@@ -666,7 +651,6 @@ type Options struct {
|
||||
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
|
||||
ResourceKey string `config:"resource_key"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote drive server
|
||||
@@ -1135,12 +1119,6 @@ func createOAuthClient(ctx context.Context, opt *Options, name string, m configm
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create oauth client from service account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
scopes := driveScopes(opt.Scope)
|
||||
oAuthClient, err = google.DefaultClient(ctx, scopes...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client from environment: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClientWithBaseClient(ctx, name, m, driveConfig, getClient(ctx, opt))
|
||||
if err != nil {
|
||||
@@ -3345,9 +3323,9 @@ This takes an optional directory to trash which make this easier to
|
||||
use via the API.
|
||||
|
||||
rclone backend untrash drive:directory
|
||||
rclone backend --interactive untrash drive:directory subdir
|
||||
rclone backend -i untrash drive:directory subdir
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be restored before restoring it.
|
||||
Use the -i flag to see what would be restored before restoring it.
|
||||
|
||||
Result:
|
||||
|
||||
@@ -3377,7 +3355,7 @@ component will be used as the file name.
|
||||
If the destination is a drive backend then server-side copying will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
Use the -i flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
@@ -3453,12 +3431,13 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(`[^\w_. -]+`)
|
||||
if _, ok := opt["config"]; ok {
|
||||
lines := []string{}
|
||||
upstreams := []string{}
|
||||
names := make(map[string]struct{}, len(drives))
|
||||
for i, drive := range drives {
|
||||
name := fspath.MakeConfigName(drive.Name)
|
||||
name := re.ReplaceAllString(drive.Name, "_")
|
||||
for {
|
||||
if _, found := names[name]; !found {
|
||||
break
|
||||
|
||||
@@ -70,7 +70,7 @@ func init() {
|
||||
When using implicit FTP over TLS the client connects using TLS
|
||||
right from the start which breaks compatibility with
|
||||
non-TLS-aware servers. This is usually served over port 990 rather
|
||||
than port 21. Cannot be used in combination with explicit FTPS.`,
|
||||
than port 21. Cannot be used in combination with explicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "explicit_tls",
|
||||
@@ -78,7 +78,7 @@ than port 21. Cannot be used in combination with explicit FTPS.`,
|
||||
|
||||
When using explicit FTP over TLS the client explicitly requests
|
||||
security from the server in order to upgrade a plain text connection
|
||||
to an encrypted one. Cannot be used in combination with implicit FTPS.`,
|
||||
to an encrypted one. Cannot be used in combination with implicit FTP.`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
@@ -657,7 +657,8 @@ func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
if remote == "" || remote == "." || remote == "/" {
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
@@ -665,32 +666,13 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("findItem: %w", err)
|
||||
}
|
||||
|
||||
// returns TRUE if MLST is supported which is required to call GetEntry
|
||||
if c.IsTimePreciseInList() {
|
||||
entry, err := c.GetEntry(f.opt.Enc.FromStandardPath(remote))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
err = translateErrorFile(err)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil {
|
||||
f.entryToStandard(entry)
|
||||
}
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -709,7 +691,7 @@ func (f *Fs) findItem(ctx context.Context, remote string) (entry *ftp.Entry, err
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -731,7 +713,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err err
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(ctx context.Context, remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(ctx, path.Join(f.root, remote))
|
||||
entry, err := f.findItem(ctx, remote)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("dirExists: %w", err)
|
||||
}
|
||||
@@ -875,18 +857,32 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(ctx context.Context, remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
file, err := f.findItem(ctx, remote)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if file != nil {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
return nil, fmt.Errorf("getInfo: %w", err)
|
||||
}
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
file := files[i]
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
precise: f.fLstTime,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -55,7 +56,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
@@ -220,7 +221,7 @@ func TestIntegration(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, fs.NewOverrideRemote(srcObj, remote))
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -33,9 +33,8 @@ var (
|
||||
lineEndSize = 1
|
||||
)
|
||||
|
||||
// prepareServer prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepareServer(t *testing.T) configmap.Simple {
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
@@ -79,21 +78,20 @@ func prepareServer(t *testing.T) configmap.Simple {
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
return m
|
||||
// return a function to tidy up
|
||||
return m, ts.Close
|
||||
}
|
||||
|
||||
// prepare prepares the test server and shuts it down automatically
|
||||
// when the test completes.
|
||||
func prepare(t *testing.T) fs.Fs {
|
||||
m := prepareServer(t)
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
m, tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(context.Background(), remoteName, "", m)
|
||||
require.NoError(t, err)
|
||||
|
||||
return f
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
@@ -136,19 +134,22 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f := prepare(t)
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
testListRoot(t, f, false)
|
||||
}
|
||||
|
||||
func TestListRootNoSlash(t *testing.T) {
|
||||
f := prepare(t)
|
||||
f, tidy := prepare(t)
|
||||
f.(*Fs).opt.NoSlash = true
|
||||
defer tidy()
|
||||
|
||||
testListRoot(t, f, true)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f := prepare(t)
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
@@ -165,7 +166,8 @@ func TestListSubDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f := prepare(t)
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -192,7 +194,8 @@ func TestNewObject(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
for _, head := range []bool{false, true} {
|
||||
if !head {
|
||||
@@ -254,7 +257,8 @@ func TestOpen(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f := prepare(t)
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
@@ -265,7 +269,8 @@ func TestMimeType(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "one%.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
@@ -274,7 +279,8 @@ func TestIsAFileRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
m, tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(context.Background(), remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
@@ -503,7 +503,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
continue
|
||||
}
|
||||
}
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, fierr)
|
||||
err = fmt.Errorf("failed to read directory %q: %w", namepath, err)
|
||||
fs.Errorf(dir, "%v", fierr)
|
||||
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
||||
continue
|
||||
|
||||
@@ -33,6 +33,7 @@ func TestMain(m *testing.M) {
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
@@ -77,6 +78,7 @@ func TestUpdatingCheck(t *testing.T) {
|
||||
func TestSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
f := r.Flocal.(*Fs)
|
||||
dir := f.root
|
||||
|
||||
@@ -175,6 +177,7 @@ func TestSymlinkError(t *testing.T) {
|
||||
func TestHashOnUpdate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -205,6 +208,7 @@ func TestHashOnUpdate(t *testing.T) {
|
||||
func TestHashOnDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "file.txt"
|
||||
when := time.Now()
|
||||
r.WriteFile(filePath, "content", when)
|
||||
@@ -233,6 +237,7 @@ func TestHashOnDelete(t *testing.T) {
|
||||
func TestMetadata(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
const filePath = "metafile.txt"
|
||||
when := time.Now()
|
||||
const dayLength = len("2001-01-01")
|
||||
@@ -367,6 +372,7 @@ func TestMetadata(t *testing.T) {
|
||||
func TestFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
r.WriteFile("included", "included file", when)
|
||||
r.WriteFile("excluded", "excluded file", when)
|
||||
|
||||
@@ -83,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -111,7 +100,6 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -216,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -126,7 +126,6 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -259,48 +259,6 @@ this flag there.
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -553,7 +511,7 @@ Example: "https://contoso.sharepoint.com/sites/mysite" or "mysite"
|
||||
`)
|
||||
case "url_end":
|
||||
siteURL := config.Result
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint\.com/sites/(.*)`)
|
||||
re := regexp.MustCompile(`https://.*\.sharepoint.com/sites/(.*)`)
|
||||
match := re.FindStringSubmatch(siteURL)
|
||||
if len(match) == 2 {
|
||||
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
|
||||
@@ -639,7 +597,6 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -656,7 +613,6 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -670,7 +626,8 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -925,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -935,15 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1609,7 +1556,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(f.hashType)
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1818,8 +1768,14 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1850,23 +1806,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
|
||||
@@ -7,40 +7,51 @@
|
||||
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
package quickxorhash
|
||||
|
||||
// This code was ported from a fast C-implementation from
|
||||
// https://github.com/namazso/QuickXorHash
|
||||
// which has licenced as BSD Zero Clause License
|
||||
//
|
||||
// BSD Zero Clause License
|
||||
//
|
||||
// Copyright (c) 2022 namazso <admin@namazso.eu>
|
||||
//
|
||||
// Permission to use, copy, modify, and/or distribute this software for any
|
||||
// purpose with or without fee is hereby granted.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
// AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
// OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
// PERFORMANCE OF THIS SOFTWARE.
|
||||
// This code was ported from the code snippet linked from
|
||||
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
|
||||
// Which has the copyright
|
||||
|
||||
import "hash"
|
||||
// ------------------------------------------------------------------------------
|
||||
// Copyright (c) 2016 Microsoft Corporation
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
// ------------------------------------------------------------------------------
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize is the preferred size for hashing
|
||||
BlockSize = 64
|
||||
// Size of the output checksum
|
||||
Size = 20
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = shift * widthInBits
|
||||
Size = 20
|
||||
bitsInLastCell = 32
|
||||
shift = 11
|
||||
widthInBits = 8 * Size
|
||||
dataSize = (widthInBits-1)/64 + 1
|
||||
)
|
||||
|
||||
type quickXorHash struct {
|
||||
data [dataSize]byte
|
||||
size uint64
|
||||
data [dataSize]uint64
|
||||
lengthSoFar uint64
|
||||
shiftSoFar int
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the quickXorHash checksum.
|
||||
@@ -59,37 +70,94 @@ func New() hash.Hash {
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
||||
var i int
|
||||
// fill last remain
|
||||
lastRemain := int(q.size) % dataSize
|
||||
if lastRemain != 0 {
|
||||
i += xorBytes(q.data[lastRemain:], p)
|
||||
currentshift := q.shiftSoFar
|
||||
|
||||
// The bitvector where we'll start xoring
|
||||
vectorArrayIndex := currentshift / 64
|
||||
|
||||
// The position within the bit vector at which we begin xoring
|
||||
vectorOffset := currentshift % 64
|
||||
iterations := len(p)
|
||||
if iterations > widthInBits {
|
||||
iterations = widthInBits
|
||||
}
|
||||
|
||||
if i != len(p) {
|
||||
for len(p)-i >= dataSize {
|
||||
i += xorBytes(q.data[:], p[i:])
|
||||
for i := 0; i < iterations; i++ {
|
||||
isLastCell := vectorArrayIndex == len(q.data)-1
|
||||
var bitsInVectorCell int
|
||||
if isLastCell {
|
||||
bitsInVectorCell = bitsInLastCell
|
||||
} else {
|
||||
bitsInVectorCell = 64
|
||||
}
|
||||
|
||||
// There's at least 2 bitvectors before we reach the end of the array
|
||||
if vectorOffset <= bitsInVectorCell-8 {
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
|
||||
}
|
||||
} else {
|
||||
index1 := vectorArrayIndex
|
||||
var index2 int
|
||||
if isLastCell {
|
||||
index2 = 0
|
||||
} else {
|
||||
index2 = vectorArrayIndex + 1
|
||||
}
|
||||
low := byte(bitsInVectorCell - vectorOffset)
|
||||
|
||||
xoredByte := byte(0)
|
||||
for j := i; j < len(p); j += widthInBits {
|
||||
xoredByte ^= p[j]
|
||||
}
|
||||
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
|
||||
q.data[index2] ^= uint64(xoredByte) >> low
|
||||
}
|
||||
vectorOffset += shift
|
||||
for vectorOffset >= bitsInVectorCell {
|
||||
if isLastCell {
|
||||
vectorArrayIndex = 0
|
||||
} else {
|
||||
vectorArrayIndex = vectorArrayIndex + 1
|
||||
}
|
||||
vectorOffset -= bitsInVectorCell
|
||||
}
|
||||
xorBytes(q.data[:], p[i:])
|
||||
}
|
||||
q.size += uint64(len(p))
|
||||
|
||||
// Update the starting position in a circular shift pattern
|
||||
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
|
||||
|
||||
q.lengthSoFar += uint64(len(p))
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// Calculate the current checksum
|
||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
for i := 0; i < dataSize; i++ {
|
||||
shift := (i * 11) % 160
|
||||
shiftBytes := shift / 8
|
||||
shiftBits := shift % 8
|
||||
shifted := int(q.data[i]) << shiftBits
|
||||
h[shiftBytes] ^= byte(shifted)
|
||||
h[shiftBytes+1] ^= byte(shifted >> 8)
|
||||
func (q *quickXorHash) checkSum() (h [Size]byte) {
|
||||
// Output the data as little endian bytes
|
||||
ph := 0
|
||||
for i := 0; i < len(q.data)-1; i++ {
|
||||
d := q.data[i]
|
||||
_ = h[ph+7] // bounds check
|
||||
h[ph+0] = byte(d >> (8 * 0))
|
||||
h[ph+1] = byte(d >> (8 * 1))
|
||||
h[ph+2] = byte(d >> (8 * 2))
|
||||
h[ph+3] = byte(d >> (8 * 3))
|
||||
h[ph+4] = byte(d >> (8 * 4))
|
||||
h[ph+5] = byte(d >> (8 * 5))
|
||||
h[ph+6] = byte(d >> (8 * 6))
|
||||
h[ph+7] = byte(d >> (8 * 7))
|
||||
ph += 8
|
||||
}
|
||||
h[0] ^= h[20]
|
||||
// remaining 32 bits
|
||||
d := q.data[len(q.data)-1]
|
||||
h[Size-4] = byte(d >> (8 * 0))
|
||||
h[Size-3] = byte(d >> (8 * 1))
|
||||
h[Size-2] = byte(d >> (8 * 2))
|
||||
h[Size-1] = byte(d >> (8 * 3))
|
||||
|
||||
// XOR the file length with the least significant bits in little endian format
|
||||
d := q.size
|
||||
d = q.lengthSoFar
|
||||
h[Size-8] ^= byte(d >> (8 * 0))
|
||||
h[Size-7] ^= byte(d >> (8 * 1))
|
||||
h[Size-6] ^= byte(d >> (8 * 2))
|
||||
@@ -106,7 +174,7 @@ func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||
// It does not change the underlying hash state.
|
||||
func (q *quickXorHash) Sum(b []byte) []byte {
|
||||
hash := q.checkSum()
|
||||
return append(b, hash[:Size]...)
|
||||
return append(b, hash[:]...)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
@@ -128,10 +196,8 @@ func (q *quickXorHash) BlockSize() int {
|
||||
}
|
||||
|
||||
// Sum returns the quickXorHash checksum of the data.
|
||||
func Sum(data []byte) (h [Size]byte) {
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d quickXorHash
|
||||
_, _ = d.Write(data)
|
||||
s := d.checkSum()
|
||||
copy(h[:], s[:])
|
||||
return h
|
||||
return d.checkSum()
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -167,16 +166,3 @@ func TestReset(t *testing.T) {
|
||||
|
||||
// check interface
|
||||
var _ hash.Hash = (*quickXorHash)(nil)
|
||||
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
h.Reset()
|
||||
h.Write(buf)
|
||||
h.Sum(nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
//go:build !go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
n := len(dst)
|
||||
if len(src) < n {
|
||||
n = len(src)
|
||||
}
|
||||
if n == 0 {
|
||||
return 0
|
||||
}
|
||||
dst = dst[:n]
|
||||
//src = src[:n]
|
||||
src = src[:len(dst)] // remove bounds check in loop
|
||||
for i := range dst {
|
||||
dst[i] ^= src[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
//go:build go1.20
|
||||
|
||||
package quickxorhash
|
||||
|
||||
import "crypto/subtle"
|
||||
|
||||
func xorBytes(dst, src []byte) int {
|
||||
return subtle.XORBytes(dst, src, dst)
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
//go:build !plan9 && !solaris && !js
|
||||
// +build !plan9,!solaris,!js
|
||||
|
||||
package oracleobjectstorage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage/transfer"
|
||||
)
|
||||
|
||||
const (
|
||||
sseDefaultAlgorithm = "AES256"
|
||||
)
|
||||
|
||||
func getSha256(p []byte) []byte {
|
||||
h := sha256.New()
|
||||
h.Write(p)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func validateSSECustomerKeyOptions(opt *Options) error {
|
||||
if opt.SSEKMSKeyID != "" && (opt.SSECustomerKeyFile != "" || opt.SSECustomerKey != "") {
|
||||
return errors.New("oos: can't use vault sse_kms_key_id and local sse_customer_key at the same time")
|
||||
}
|
||||
if opt.SSECustomerKey != "" && opt.SSECustomerKeyFile != "" {
|
||||
return errors.New("oos: can't use sse_customer_key and sse_customer_key_file at the same time")
|
||||
}
|
||||
if opt.SSEKMSKeyID != "" {
|
||||
return nil
|
||||
}
|
||||
err := populateSSECustomerKeys(opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateSSECustomerKeys(opt *Options) error {
|
||||
if opt.SSECustomerKeyFile != "" {
|
||||
// Reads the base64-encoded AES key data from the specified file and computes its SHA256 checksum
|
||||
data, err := os.ReadFile(expandPath(opt.SSECustomerKeyFile))
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: error reading sse_customer_key_file: %v", err)
|
||||
}
|
||||
opt.SSECustomerKey = strings.TrimSpace(string(data))
|
||||
}
|
||||
if opt.SSECustomerKey != "" {
|
||||
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("oos: Could not decode sse_customer_key_file: %w", err)
|
||||
}
|
||||
sha256Checksum := base64.StdEncoding.EncodeToString(getSha256(decoded))
|
||||
if opt.SSECustomerKeySha256 == "" {
|
||||
opt.SSECustomerKeySha256 = sha256Checksum
|
||||
} else {
|
||||
if opt.SSECustomerKeySha256 != sha256Checksum {
|
||||
return fmt.Errorf("the computed SHA256 checksum "+
|
||||
"(%v) of the key doesn't match the config entry sse_customer_key_sha256=(%v)",
|
||||
sha256Checksum, opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
if opt.SSECustomerAlgorithm == "" {
|
||||
opt.SSECustomerAlgorithm = sseDefaultAlgorithm
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// https://docs.oracle.com/en-us/iaas/Content/Object/Tasks/usingyourencryptionkeys.htm
|
||||
func useBYOKPutObject(fs *Fs, request *objectstorage.PutObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKHeadObject(fs *Fs, request *objectstorage.HeadObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKGetObject(fs *Fs, request *objectstorage.GetObjectRequest) {
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKCopyObject(fs *Fs, request *objectstorage.CopyObjectRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
|
||||
func useBYOKUpload(fs *Fs, request *transfer.UploadRequest) {
|
||||
if fs.opt.SSEKMSKeyID != "" {
|
||||
request.OpcSseKmsKeyId = common.String(fs.opt.SSEKMSKeyID)
|
||||
}
|
||||
if fs.opt.SSECustomerAlgorithm != "" {
|
||||
request.OpcSseCustomerAlgorithm = common.String(fs.opt.SSECustomerAlgorithm)
|
||||
}
|
||||
if fs.opt.SSECustomerKey != "" {
|
||||
request.OpcSseCustomerKey = common.String(fs.opt.SSECustomerKey)
|
||||
}
|
||||
if fs.opt.SSECustomerKeySha256 != "" {
|
||||
request.OpcSseCustomerKeySha256 = common.String(fs.opt.SSECustomerKeySha256)
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/common/auth"
|
||||
@@ -20,33 +18,15 @@ import (
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
)
|
||||
|
||||
func expandPath(filepath string) (expandedPath string) {
|
||||
if filepath == "" {
|
||||
return filepath
|
||||
}
|
||||
cleanedPath := path.Clean(filepath)
|
||||
expandedPath = cleanedPath
|
||||
if strings.HasPrefix(cleanedPath, "~") {
|
||||
rest := cleanedPath[2:]
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return expandedPath
|
||||
}
|
||||
expandedPath = path.Join(home, rest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error) {
|
||||
switch opt.Provider {
|
||||
case instancePrincipal:
|
||||
return auth.InstancePrincipalConfigurationProvider()
|
||||
case userPrincipal:
|
||||
expandConfigFilePath := expandPath(opt.ConfigFile)
|
||||
if expandConfigFilePath != "" && !fileExists(expandConfigFilePath) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", expandConfigFilePath)
|
||||
if opt.ConfigFile != "" && !fileExists(opt.ConfigFile) {
|
||||
fs.Errorf(userPrincipal, "oci config file doesn't exist at %v", opt.ConfigFile)
|
||||
}
|
||||
return common.CustomProfileConfigProvider(expandConfigFilePath, opt.ConfigProfile), nil
|
||||
return common.CustomProfileConfigProvider(opt.ConfigFile, opt.ConfigProfile), nil
|
||||
case resourcePrincipal:
|
||||
return auth.ResourcePrincipalConfigurationProvider()
|
||||
case noAuth:
|
||||
|
||||
@@ -65,8 +65,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup oos:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
|
||||
|
||||
@@ -74,7 +74,6 @@ func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object) (err erro
|
||||
BucketName: common.String(srcBucket),
|
||||
CopyObjectDetails: copyObjectDetails,
|
||||
}
|
||||
useBYOKCopyObject(f, &req)
|
||||
var resp objectstorage.CopyObjectResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CopyObject(ctx, req)
|
||||
|
||||
@@ -87,7 +87,6 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
BucketName: common.String(bucketName),
|
||||
ObjectName: common.String(objectPath),
|
||||
}
|
||||
useBYOKHeadObject(o.fs, &req)
|
||||
var response objectstorage.HeadObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -100,7 +99,6 @@ func (o *Object) headObject(ctx context.Context) (info *objectstorage.HeadObject
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
fs.Errorf(o, "Failed to head object: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
o.fs.cache.MarkOK(bucketName)
|
||||
@@ -333,7 +331,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadClo
|
||||
ObjectName: common.String(bucketPath),
|
||||
}
|
||||
o.applyGetObjectOptions(&req, options...)
|
||||
useBYOKGetObject(o.fs, &req)
|
||||
|
||||
var resp objectstorage.GetObjectResponse
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
var err error
|
||||
@@ -435,7 +433,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
uploadRequest.StorageTier = storageTier
|
||||
}
|
||||
o.applyMultiPutOptions(&uploadRequest, options...)
|
||||
useBYOKUpload(o.fs, &uploadRequest)
|
||||
uploadStreamRequest := transfer.UploadStreamRequest{
|
||||
UploadRequest: uploadRequest,
|
||||
StreamReader: in,
|
||||
@@ -509,10 +506,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
req.StorageTier = storageTier
|
||||
}
|
||||
o.applyPutOptions(&req, options...)
|
||||
useBYOKPutObject(o.fs, &req)
|
||||
var resp objectstorage.PutObjectResponse
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.PutObject(ctx, req)
|
||||
resp, err := o.fs.srv.PutObject(ctx, req)
|
||||
return shouldRetry(ctx, resp.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -17,7 +17,9 @@ const (
|
||||
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
|
||||
defaultUploadConcurrency = 10
|
||||
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
||||
minSleep = 10 * time.Millisecond
|
||||
minSleep = 100 * time.Millisecond
|
||||
maxSleep = 5 * time.Minute
|
||||
decayConstant = 1 // bigger for slower decay, exponential
|
||||
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
|
||||
)
|
||||
|
||||
@@ -45,28 +47,23 @@ https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfromins
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyFile string `config:"sse_customer_key_file"`
|
||||
SSECustomerKeySha256 string `config:"sse_customer_key_sha256"`
|
||||
Provider string `config:"provider"`
|
||||
Compartment string `config:"compartment"`
|
||||
Namespace string `config:"namespace"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
ConfigFile string `config:"config_file"`
|
||||
ConfigProfile string `config:"config_profile"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
CopyTimeout fs.Duration `config:"copy_timeout"`
|
||||
StorageTier string `config:"storage_tier"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
}
|
||||
|
||||
func newOptions() []fs.Option {
|
||||
@@ -126,22 +123,6 @@ func newOptions() []fs.Option {
|
||||
Value: "Default",
|
||||
Help: "Use the default profile",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://github.com/oracle/oci-go-sdk/blob/master/objectstorage/storage_tier.go
|
||||
Name: "storage_tier",
|
||||
Help: "The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm",
|
||||
Default: "Standard",
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "Standard",
|
||||
Help: "Standard storage tier, this is the default tier",
|
||||
}, {
|
||||
Value: "InfrequentAccess",
|
||||
Help: "InfrequentAccess storage tier",
|
||||
}, {
|
||||
Value: "Archive",
|
||||
Help: "Archive storage tier",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: `Cutoff for switching to chunked upload.
|
||||
@@ -257,59 +238,5 @@ creation permissions.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "sse_customer_key_file",
|
||||
Help: `To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
|
||||
with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key",
|
||||
Help: `To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
|
||||
encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
|
||||
needed. For more information, see Using Your Own Keys for Server-Side Encryption
|
||||
(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_key_sha256",
|
||||
Help: `If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
|
||||
key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
|
||||
Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_kms_key_id",
|
||||
Help: `if using using your own master key in vault, this header specifies the
|
||||
OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
|
||||
the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
|
||||
Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}},
|
||||
}, {
|
||||
Name: "sse_customer_algorithm",
|
||||
Help: `If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
|
||||
Object Storage supports "AES256" as the encryption algorithm. For more information, see
|
||||
Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).`,
|
||||
Advanced: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "None",
|
||||
}, {
|
||||
Value: sseDefaultAlgorithm,
|
||||
Help: sseDefaultAlgorithm,
|
||||
}},
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -59,27 +59,19 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = validateSSECustomerKeyOptions(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
objectStorageClient, err := newObjectStorageClient(ctx, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
|
||||
// Set pacer retries to 2 (1 try and 1 retry) because we are
|
||||
// relying on SDK retry mechanism, but we allow 2 attempts to
|
||||
// retry directory listings after XMLSyntaxError
|
||||
pc.SetRetries(2)
|
||||
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
ci: ci,
|
||||
srv: objectStorageClient,
|
||||
cache: bucket.NewCache(),
|
||||
pacer: pc,
|
||||
pacer: fs.NewPacer(ctx, p),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
|
||||
271
backend/s3/s3.go
271
backend/s3/s3.go
@@ -66,7 +66,7 @@ import (
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "s3",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS, Qiniu and Wasabi",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
@@ -105,7 +105,7 @@ func init() {
|
||||
Help: "Arvan Cloud Object Storage (AOS)",
|
||||
}, {
|
||||
Value: "DigitalOcean",
|
||||
Help: "DigitalOcean Spaces",
|
||||
Help: "Digital Ocean Spaces",
|
||||
}, {
|
||||
Value: "Dreamhost",
|
||||
Help: "Dreamhost DreamObjects",
|
||||
@@ -124,9 +124,6 @@ func init() {
|
||||
}, {
|
||||
Value: "LyveCloud",
|
||||
Help: "Seagate Lyve Cloud",
|
||||
}, {
|
||||
Value: "Liara",
|
||||
Help: "Liara Object Storage",
|
||||
}, {
|
||||
Value: "Minio",
|
||||
Help: "Minio Object Storage",
|
||||
@@ -440,7 +437,7 @@ func init() {
|
||||
}, {
|
||||
Name: "region",
|
||||
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Liara,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
|
||||
@@ -765,15 +762,6 @@ func init() {
|
||||
Value: "s3-eu-south-2.ionoscloud.com",
|
||||
Help: "Logrono, Spain",
|
||||
}},
|
||||
}, {
|
||||
// Liara endpoints: https://liara.ir/landing/object-storage
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Liara Object Storage API.",
|
||||
Provider: "Liara",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "storage.iran.liara.space",
|
||||
Help: "The default endpoint\nIran",
|
||||
}},
|
||||
}, {
|
||||
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
|
||||
Name: "endpoint",
|
||||
@@ -936,11 +924,17 @@ func init() {
|
||||
}},
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for Storj Gateway.",
|
||||
Help: "Endpoint of the Shared Gateway.",
|
||||
Provider: "Storj",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "gateway.storjshare.io",
|
||||
Help: "Global Hosted Gateway",
|
||||
Value: "gateway.eu1.storjshare.io",
|
||||
Help: "EU1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.us1.storjshare.io",
|
||||
Help: "US1 Shared Gateway",
|
||||
}, {
|
||||
Value: "gateway.ap1.storjshare.io",
|
||||
Help: "Asia-Pacific Shared Gateway",
|
||||
}},
|
||||
}, {
|
||||
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
|
||||
@@ -1098,34 +1092,22 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,Liara,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp,Qiniu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
Provider: "Dreamhost",
|
||||
}, {
|
||||
Value: "syd1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Sydney 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sfo3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces San Francisco 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "fra1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Frankfurt 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "nyc3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces New York 3",
|
||||
Help: "Digital Ocean Spaces New York 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "ams3.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Amsterdam 3",
|
||||
Help: "Digital Ocean Spaces Amsterdam 3",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "sgp1.digitaloceanspaces.com",
|
||||
Help: "DigitalOcean Spaces Singapore 1",
|
||||
Help: "Digital Ocean Spaces Singapore 1",
|
||||
Provider: "DigitalOcean",
|
||||
}, {
|
||||
Value: "localhost:8333",
|
||||
@@ -1195,10 +1177,6 @@ func init() {
|
||||
Value: "s3.ap-southeast-2.wasabisys.com",
|
||||
Help: "Wasabi AP Southeast 2 (Sydney)",
|
||||
Provider: "Wasabi",
|
||||
}, {
|
||||
Value: "storage.iran.liara.space",
|
||||
Help: "Liara Iran endpoint",
|
||||
Provider: "Liara",
|
||||
}, {
|
||||
Value: "s3.ir-thr-at1.arvanstorage.com",
|
||||
Help: "ArvanCloud Tehran Iran (Asiatech) endpoint",
|
||||
@@ -1582,7 +1560,7 @@ func init() {
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Liara,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
|
||||
}, {
|
||||
Name: "acl",
|
||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||
@@ -1815,15 +1793,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Value: "STANDARD_IA",
|
||||
Help: "Infrequent access storage mode",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://liara.ir/landing/object-storage
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Liara",
|
||||
Provider: "Liara",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://www.arvancloud.com/en/products/cloud-storage
|
||||
Name: "storage_class",
|
||||
@@ -2266,11 +2235,6 @@ rclone's choice here.
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "sts_endpoint",
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2357,7 +2321,6 @@ type Options struct {
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2534,7 +2497,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -2566,38 +2529,6 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Default name resolver
|
||||
var defaultResolver = endpoints.DefaultResolver()
|
||||
|
||||
// resolve (service, region) to endpoint
|
||||
//
|
||||
// Used to set endpoint for s3 services and not for other services
|
||||
type resolver map[string]string
|
||||
|
||||
// Add a service to the resolver, ignoring empty urls
|
||||
func (r resolver) addService(service, url string) {
|
||||
if url == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = "https://" + url
|
||||
}
|
||||
r[service] = url
|
||||
}
|
||||
|
||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
||||
url, ok := r[service]
|
||||
if ok {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: url,
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
return defaultResolver.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -2676,12 +2607,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
||||
// If endpoints are set, override the relevant services only
|
||||
r := make(resolver)
|
||||
r.addService("s3", opt.Endpoint)
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
@@ -2837,10 +2764,6 @@ func setQuirks(opt *Options) {
|
||||
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
case "Liara":
|
||||
virtualHostStyle = false
|
||||
urlEncodeListings = false
|
||||
useMultipartEtag = false
|
||||
case "LyveCloud":
|
||||
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
|
||||
case "Minio":
|
||||
@@ -3031,15 +2954,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
GetTier: true,
|
||||
SlowModTime: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
|
||||
if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
oldRoot := f.root
|
||||
@@ -3054,6 +2968,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if opt.Provider == "Storj" {
|
||||
f.features.SetTier = false
|
||||
f.features.GetTier = false
|
||||
}
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
// f.listMultipartUploads()
|
||||
return f, nil
|
||||
}
|
||||
|
||||
@@ -3105,17 +3027,6 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
|
||||
return info, versionID, nil
|
||||
}
|
||||
|
||||
// stringClonePointer clones the string pointed to by sp into new
|
||||
// memory. This is useful to stop us keeping references to small
|
||||
// strings carved out of large XML responses.
|
||||
func stringClonePointer(sp *string) *string {
|
||||
if sp == nil {
|
||||
return nil
|
||||
}
|
||||
var s = *sp
|
||||
return &s
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error ErrorObjectNotFound.
|
||||
@@ -3141,8 +3052,8 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Obje
|
||||
}
|
||||
o.setMD5FromEtag(aws.StringValue(info.ETag))
|
||||
o.bytes = aws.Int64Value(info.Size)
|
||||
o.storageClass = stringClonePointer(info.StorageClass)
|
||||
o.versionID = stringClonePointer(versionID)
|
||||
o.storageClass = info.StorageClass
|
||||
o.versionID = versionID
|
||||
} else if !o.fs.opt.NoHeadObject {
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
@@ -3283,9 +3194,6 @@ func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versi
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if aws.BoolValue(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") {
|
||||
return nil, nil, errors.New("s3 protocol error: received listing v2 with IsTruncated set and no NextContinuationToken. Should you be using `--s3-list-version 1`?")
|
||||
}
|
||||
ls.req.ContinuationToken = resp.NextContinuationToken
|
||||
return resp, nil, nil
|
||||
}
|
||||
@@ -3468,16 +3376,15 @@ var errEndList = errors.New("end list")
|
||||
|
||||
// list options
|
||||
type listOpt struct {
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
noSkipMarkers bool // if set return dir marker objects
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
}
|
||||
|
||||
// list lists the objects into the function supplied with the opt
|
||||
@@ -3590,7 +3497,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||
@@ -3619,10 +3526,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if versionIDs != nil {
|
||||
@@ -3912,7 +3819,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||
}
|
||||
@@ -4147,9 +4054,9 @@ Usage Examples:
|
||||
rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS]
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
@@ -4217,8 +4124,8 @@ a bucket or with a bucket and path.
|
||||
Long: `This command removes unfinished multipart uploads of age greater than
|
||||
max-age which defaults to 24 hours.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup s3:bucket/path/to/object
|
||||
rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
|
||||
@@ -4234,8 +4141,8 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
Long: `This command removes any old hidden versions of files
|
||||
on a versions enabled bucket.
|
||||
|
||||
Note that you can use --interactive/-i or --dry-run with this command to see what
|
||||
it would do.
|
||||
Note that you can use -i/--dry-run with this command to see what it
|
||||
would do.
|
||||
|
||||
rclone backend cleanup-hidden s3:bucket/path/to/dir
|
||||
`,
|
||||
@@ -4569,14 +4476,13 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||
}()
|
||||
checkErr(f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
noSkipMarkers: true,
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -4586,7 +4492,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
return nil
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
// Work out whether the file is the current version or not
|
||||
isCurrentVersion := !versioned || !version.Match(remote)
|
||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||
@@ -5030,7 +4936,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
|
||||
var warnStreamUpload sync.Once
|
||||
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (wantETag, gotETag string, versionID *string, err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, versionID *string, err error) {
|
||||
f := o.fs
|
||||
|
||||
// make concurrency machinery
|
||||
@@ -5074,7 +4980,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to initialise: %w", err)
|
||||
}
|
||||
uid := cout.UploadId
|
||||
|
||||
@@ -5147,7 +5053,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
free()
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to read source: %w", err)
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
@@ -5202,7 +5108,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
}
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, err
|
||||
return etag, nil, err
|
||||
}
|
||||
|
||||
// sort the completed parts by part number
|
||||
@@ -5224,17 +5130,14 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return wantETag, gotETag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
return etag, nil, fmt.Errorf("multipart upload failed to finalise: %w", err)
|
||||
}
|
||||
hashOfHashes := md5.Sum(md5s)
|
||||
wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
|
||||
if resp != nil {
|
||||
if resp.ETag != nil {
|
||||
gotETag = *resp.ETag
|
||||
}
|
||||
versionID = resp.VersionId
|
||||
}
|
||||
return wantETag, gotETag, versionID, nil
|
||||
return etag, versionID, nil
|
||||
}
|
||||
|
||||
// unWrapAwsError unwraps AWS errors, looking for a non AWS error
|
||||
@@ -5534,58 +5437,52 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
}
|
||||
|
||||
var wantETag string // Multipart upload Etag to check
|
||||
var gotETag string // Etag we got from the upload
|
||||
var gotEtag string // Etag we got from the upload
|
||||
var lastModified time.Time // Time we got from the upload
|
||||
var versionID *string // versionID we got from the upload
|
||||
if multipart {
|
||||
wantETag, gotETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
wantETag, versionID, err = o.uploadMultipart(ctx, &req, size, in)
|
||||
} else {
|
||||
if o.fs.opt.UsePresignedRequest {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, &req, size, in)
|
||||
} else {
|
||||
gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
gotEtag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, &req, size, in)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only record versionID if we are using --s3-versions or --s3-version-at
|
||||
if o.fs.opt.Versions || o.fs.opt.VersionAt.IsSet() {
|
||||
o.versionID = versionID
|
||||
} else {
|
||||
o.versionID = nil
|
||||
}
|
||||
o.versionID = versionID
|
||||
|
||||
// User requested we don't HEAD the object after uploading it
|
||||
// so make up the object as best we can assuming it got
|
||||
// uploaded properly. If size < 0 then we need to do the HEAD.
|
||||
var head *s3.HeadObjectOutput
|
||||
if o.fs.opt.NoHead && size >= 0 {
|
||||
head = new(s3.HeadObjectOutput)
|
||||
//structs.SetFrom(head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(head, &req)
|
||||
var head s3.HeadObjectOutput
|
||||
//structs.SetFrom(&head, &req)
|
||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||
head.ContentLength = &size
|
||||
// We get etag back from single and multipart upload so fill it in here
|
||||
if gotETag != "" {
|
||||
head.ETag = &gotETag
|
||||
// If we have done a single part PUT request then we can read these
|
||||
if gotEtag != "" {
|
||||
head.ETag = &gotEtag
|
||||
}
|
||||
if lastModified.IsZero() {
|
||||
lastModified = time.Now()
|
||||
}
|
||||
head.LastModified = &lastModified
|
||||
head.VersionId = versionID
|
||||
} else {
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err = o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(&head)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read the metadata from the newly created object
|
||||
o.meta = nil // wipe old metadata
|
||||
head, err := o.headObject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(head)
|
||||
|
||||
// Check multipart upload ETag if required
|
||||
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
|
||||
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
|
||||
if wantETag != gotETag {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Renew allows tokens to be renewed on expiry.
|
||||
type Renew struct {
|
||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||
run func() error // the callback to do the renewal
|
||||
done chan interface{} // channel to end the go routine
|
||||
shutdown *sync.Once
|
||||
}
|
||||
|
||||
// NewRenew creates a new Renew struct and starts a background process
|
||||
// which renews the token whenever it expires. It uses the run() call
|
||||
// to do the renewal.
|
||||
func NewRenew(every time.Duration, run func() error) *Renew {
|
||||
r := &Renew{
|
||||
ts: time.NewTicker(every),
|
||||
run: run,
|
||||
done: make(chan interface{}),
|
||||
shutdown: &sync.Once{},
|
||||
}
|
||||
go r.renewOnExpiry()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Renew) renewOnExpiry() {
|
||||
for {
|
||||
select {
|
||||
case <-r.ts.C:
|
||||
err := r.run()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "error while refreshing decryption token: %s", err)
|
||||
}
|
||||
|
||||
case <-r.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown stops the ticker and no more renewal will take place.
|
||||
func (r *Renew) Shutdown() {
|
||||
// closing a channel can only be done once
|
||||
r.shutdown.Do(func() {
|
||||
r.ts.Stop()
|
||||
close(r.done)
|
||||
})
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
renew := NewRenew(time.Hour, func() error {
|
||||
return nil
|
||||
})
|
||||
renew.Shutdown()
|
||||
renew.Shutdown()
|
||||
}
|
||||
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
return nil
|
||||
})
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
}
|
||||
@@ -143,7 +143,6 @@ type Fs struct {
|
||||
createDirMutex sync.Mutex // Protect creation of directories
|
||||
useOldDirectoryAPI bool // Use the old API v2 if seafile < 7
|
||||
moveDirNotAvailable bool // Version < 7.0 don't have an API to move a directory
|
||||
renew *Renew // Renew an encrypted library token
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -269,11 +268,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
// And remove the public link feature
|
||||
f.features.PublicLink = nil
|
||||
|
||||
// renew the library password every 45 minutes
|
||||
f.renew = NewRenew(45*time.Minute, func() error {
|
||||
return f.authorizeLibrary(context.Background(), libraryID)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// Deactivate the cleaner feature since there's no library selected
|
||||
@@ -389,15 +383,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||
}
|
||||
|
||||
// Shutdown the Fs
|
||||
func (f *Fs) Shutdown(ctx context.Context) error {
|
||||
if f.renew == nil {
|
||||
return nil
|
||||
}
|
||||
f.renew.Shutdown()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets the AuthorizationToken up
|
||||
func (f *Fs) setAuthorizationToken(token string) {
|
||||
f.srv.SetHeader("Authorization", "Token "+token)
|
||||
@@ -1346,7 +1331,6 @@ var (
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.UserInfoer = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
|
||||
@@ -1775,14 +1775,11 @@ func (o *Object) setMetadata(info os.FileInfo) {
|
||||
|
||||
// statRemote stats the file or directory at the remote given
|
||||
func (f *Fs) stat(ctx context.Context, remote string) (info os.FileInfo, err error) {
|
||||
absPath := remote
|
||||
if !strings.HasPrefix(remote, "/") {
|
||||
absPath = path.Join(f.absRoot, remote)
|
||||
}
|
||||
c, err := f.getSftpConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat: %w", err)
|
||||
}
|
||||
absPath := path.Join(f.absRoot, remote)
|
||||
info, err = c.sftpClient.Stat(absPath)
|
||||
f.putSftpConnection(&c, err)
|
||||
return info, err
|
||||
|
||||
@@ -81,7 +81,7 @@ func (c *conn) closed() bool {
|
||||
// list the shares
|
||||
_, nopErr = c.smbSession.ListSharenames()
|
||||
}
|
||||
return nopErr != nil
|
||||
return nopErr == nil
|
||||
}
|
||||
|
||||
// Show that we are using a SMB session
|
||||
@@ -103,10 +103,6 @@ func (f *Fs) getSessions() int32 {
|
||||
|
||||
// Open a new connection to the SMB server.
|
||||
func (f *Fs) newConnection(ctx context.Context, share string) (c *conn, err error) {
|
||||
// As we are pooling these connections we need to decouple
|
||||
// them from the current context
|
||||
ctx = context.Background()
|
||||
|
||||
c, err = f.dial(ctx, "tcp", f.opt.Host+":"+f.opt.Port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't connect SMB: %w", err)
|
||||
|
||||
@@ -478,15 +478,11 @@ func (f *Fs) makeEntryRelative(share, _path, relative string, stat os.FileInfo)
|
||||
}
|
||||
|
||||
func (f *Fs) ensureDirectory(ctx context.Context, share, _path string) error {
|
||||
dir := path.Dir(_path)
|
||||
if dir == "." {
|
||||
return nil
|
||||
}
|
||||
cn, err := f.getConnection(ctx, share)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(dir), 0o755)
|
||||
err = cn.smbShare.MkdirAll(f.toSambaPath(path.Dir(_path)), 0o755)
|
||||
f.putConnection(&cn)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"storj.io/uplink"
|
||||
"storj.io/uplink/edge"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,9 +31,9 @@ const (
|
||||
)
|
||||
|
||||
var satMap = map[string]string{
|
||||
"us1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777",
|
||||
"eu1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@eu1.storj.io:7777",
|
||||
"ap1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@ap1.storj.io:7777",
|
||||
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
|
||||
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
@@ -106,16 +105,16 @@ func init() {
|
||||
Name: "satellite_address",
|
||||
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
|
||||
Provider: newProvider,
|
||||
Default: "us1.storj.io",
|
||||
Default: "us-central-1.storj.io",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "us1.storj.io",
|
||||
Help: "US1",
|
||||
Value: "us-central-1.storj.io",
|
||||
Help: "US Central 1",
|
||||
}, {
|
||||
Value: "eu1.storj.io",
|
||||
Help: "EU1",
|
||||
Value: "europe-west-1.storj.io",
|
||||
Help: "Europe West 1",
|
||||
}, {
|
||||
Value: "ap1.storj.io",
|
||||
Help: "AP1",
|
||||
Value: "asia-east-1.storj.io",
|
||||
Help: "Asia East 1",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -157,13 +156,10 @@ type Fs struct {
|
||||
|
||||
// Check the interfaces are satisfied.
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PublicLinker = &Fs{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
)
|
||||
|
||||
// NewFs creates a filesystem backed by Storj.
|
||||
@@ -548,7 +544,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
defer func() {
|
||||
if err != nil {
|
||||
aerr := upload.Abort()
|
||||
if aerr != nil && !errors.Is(aerr, uplink.ErrUploadDone) {
|
||||
if aerr != nil {
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr)
|
||||
}
|
||||
}
|
||||
@@ -563,16 +559,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
||||
|
||||
_, err = io.Copy(upload, in)
|
||||
if err != nil {
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
// Rclone assumes the backend will create the bucket if not existing yet.
|
||||
// Here we create the bucket and return a retry error for rclone to retry the upload.
|
||||
_, err = f.project.EnsureBucket(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried"))
|
||||
}
|
||||
|
||||
err = fserrors.RetryError(err)
|
||||
fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err)
|
||||
|
||||
@@ -734,143 +720,3 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Read the new object
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
//
|
||||
// It returns the destination Object and a possible error.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Copy parameters
|
||||
srcBucket, srcKey := bucket.Split(srcObj.absolute)
|
||||
dstBucket, dstKey := f.absolute(remote)
|
||||
options := uplink.CopyObjectOptions{}
|
||||
|
||||
// Do the copy
|
||||
newObject, err := f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
// Make sure destination bucket exists
|
||||
_, err := f.project.EnsureBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy object failed to create destination bucket: %w", err)
|
||||
}
|
||||
// And try again
|
||||
newObject, err = f.project.CopyObject(ctx, srcBucket, srcKey, dstBucket, dstKey, &options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("copy object failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return the new object
|
||||
return newObjectFromUplink(f, remote, newObject), nil
|
||||
}
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
bucket, directory := f.absolute(dir)
|
||||
if bucket == "" {
|
||||
return errors.New("can't purge from root")
|
||||
}
|
||||
|
||||
if directory == "" {
|
||||
_, err := f.project.DeleteBucketWithObjects(ctx, bucket)
|
||||
if errors.Is(err, uplink.ErrBucketNotFound) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(directory, "Quick delete is available only for entire bucket. Falling back to list and delete.")
|
||||
objects := f.project.ListObjects(ctx, bucket,
|
||||
&uplink.ListObjectsOptions{
|
||||
Prefix: directory + "/",
|
||||
Recursive: true,
|
||||
},
|
||||
)
|
||||
if err := objects.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
empty := true
|
||||
for objects.Next() {
|
||||
empty = false
|
||||
_, err := f.project.DeleteObject(ctx, bucket, objects.Item().Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fs.Infof(objects.Item().Key, "Deleted")
|
||||
}
|
||||
|
||||
if empty {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
||||
bucket, key := f.absolute(remote)
|
||||
if bucket == "" {
|
||||
return "", errors.New("path must be specified")
|
||||
}
|
||||
|
||||
// Rclone requires that a link is only generated if the remote path exists
|
||||
if key == "" {
|
||||
_, err := f.project.StatBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
_, err := f.project.StatObject(ctx, bucket, key)
|
||||
if err != nil {
|
||||
if !errors.Is(err, uplink.ErrObjectNotFound) {
|
||||
return "", err
|
||||
}
|
||||
// No object found, check if there is such a prefix
|
||||
iter := f.project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{Prefix: key + "/"})
|
||||
if iter.Err() != nil {
|
||||
return "", iter.Err()
|
||||
}
|
||||
if !iter.Next() {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sharedPrefix := uplink.SharePrefix{Bucket: bucket, Prefix: key}
|
||||
|
||||
permission := uplink.ReadOnlyPermission()
|
||||
if expire.IsSet() {
|
||||
permission.NotAfter = time.Now().Add(time.Duration(expire))
|
||||
}
|
||||
|
||||
sharedAccess, err := f.access.Share(permission, sharedPrefix)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("sharing access to object failed: %w", err)
|
||||
}
|
||||
|
||||
creds, err := (&edge.Config{
|
||||
AuthServiceAddress: "auth.storjshare.io:7777",
|
||||
}).RegisterAccess(ctx, sharedAccess, &edge.RegisterAccessOptions{Public: true})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating public link failed: %w", err)
|
||||
}
|
||||
|
||||
return edge.JoinShareURL("https://link.storjshare.io", creds.AccessKeyID, bucket, key, nil)
|
||||
}
|
||||
|
||||
@@ -214,7 +214,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
f.srv = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
f.IDRegexp = regexp.MustCompile(`https://uptobox\.com/([a-zA-Z0-9]+)`)
|
||||
f.IDRegexp = regexp.MustCompile("https://uptobox.com/([a-zA-Z0-9]+)")
|
||||
|
||||
_, err = f.readMetaDataForPath(ctx, f.dirPath(""), &api.MetadataRequestOptions{Limit: 10})
|
||||
if err != nil {
|
||||
|
||||
@@ -712,7 +712,6 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
continue
|
||||
}
|
||||
subPath := u.Path[len(baseURL.Path):]
|
||||
subPath = strings.TrimPrefix(subPath, "/") // ignore leading / here for davrods
|
||||
if f.opt.Enc != encoder.EncodeZero {
|
||||
subPath = f.opt.Enc.ToStandardPath(subPath)
|
||||
}
|
||||
@@ -992,7 +991,6 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
}
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
srcFs := srcObj.fs
|
||||
dstPath := f.filePath(remote)
|
||||
err := f.mkParentDir(ctx, dstPath)
|
||||
if err != nil {
|
||||
@@ -1015,10 +1013,9 @@ func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, metho
|
||||
if f.useOCMtime {
|
||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
|
||||
}
|
||||
// Direct the MOVE/COPY to the source server
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
resp, err = srcFs.srv.Call(ctx, &opts)
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Copy call failed: %w", err)
|
||||
@@ -1112,10 +1109,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
"Overwrite": "F",
|
||||
},
|
||||
}
|
||||
// Direct the MOVE/COPY to the source server
|
||||
err = srcFs.pacer.Call(func() (bool, error) {
|
||||
resp, err = srcFs.srv.Call(ctx, &opts)
|
||||
return srcFs.shouldRetry(ctx, resp, err)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("DirMove MOVE call failed: %w", err)
|
||||
|
||||
@@ -24,23 +24,15 @@ var (
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
// with each request the headers option tests are executed
|
||||
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// test the headers are there send send a dummy response to About
|
||||
// file server
|
||||
fileServer := http.FileServer(http.Dir(""))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fmt.Fprintf(w, `<d:multistatus xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns" xmlns:oc="http://owncloud.org/ns" xmlns:nc="http://nextcloud.org/ns">
|
||||
<d:response>
|
||||
<d:href>/remote.php/webdav/</d:href>
|
||||
<d:propstat>
|
||||
<d:prop>
|
||||
<d:quota-available-bytes>-3</d:quota-available-bytes>
|
||||
<d:quota-used-bytes>376461895</d:quota-used-bytes>
|
||||
</d:prop>
|
||||
<d:status>HTTP/1.1 200 OK</d:status>
|
||||
</d:propstat>
|
||||
</d:response>
|
||||
</d:multistatus>`)
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
@@ -76,7 +68,7 @@ func TestHeaders(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
// send an About response since that is all the dummy server can return
|
||||
// any request will do
|
||||
_, err := f.Features().About(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
# This adds the version each backend was released to its docs page
|
||||
set -e
|
||||
for backend in $( find backend -maxdepth 1 -type d ); do
|
||||
backend=$(basename $backend)
|
||||
if [[ "$backend" == "backend" || "$backend" == "vfs" || "$backend" == "all" || "$backend" == "azurefile" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
|
||||
if [ "$commit" == "" ]; then
|
||||
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
|
||||
fi
|
||||
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
|
||||
echo $backend $version
|
||||
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
|
||||
done
|
||||
@@ -57,7 +57,6 @@ var osarches = []string{
|
||||
"linux/386",
|
||||
"linux/amd64",
|
||||
"linux/arm",
|
||||
"linux/arm-v6",
|
||||
"linux/arm-v7",
|
||||
"linux/arm64",
|
||||
"linux/mips",
|
||||
@@ -65,12 +64,10 @@ var osarches = []string{
|
||||
"freebsd/386",
|
||||
"freebsd/amd64",
|
||||
"freebsd/arm",
|
||||
"freebsd/arm-v6",
|
||||
"freebsd/arm-v7",
|
||||
"netbsd/386",
|
||||
"netbsd/amd64",
|
||||
"netbsd/arm",
|
||||
"netbsd/arm-v6",
|
||||
"netbsd/arm-v7",
|
||||
"openbsd/386",
|
||||
"openbsd/amd64",
|
||||
@@ -85,16 +82,13 @@ var archFlags = map[string][]string{
|
||||
"386": {"GO386=softfloat"},
|
||||
"mips": {"GOMIPS=softfloat"},
|
||||
"mipsle": {"GOMIPS=softfloat"},
|
||||
"arm": {"GOARM=5"},
|
||||
"arm-v6": {"GOARM=6"},
|
||||
"arm-v7": {"GOARM=7"},
|
||||
}
|
||||
|
||||
// Map Go architectures to NFPM architectures
|
||||
// Any missing are passed straight through
|
||||
var goarchToNfpm = map[string]string{
|
||||
"arm": "arm5",
|
||||
"arm-v6": "arm6",
|
||||
"arm": "arm6",
|
||||
"arm-v7": "arm7",
|
||||
}
|
||||
|
||||
|
||||
@@ -93,9 +93,6 @@ provided by a backend. Where the value is unlimited it is omitted.
|
||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -12,14 +12,12 @@ import (
|
||||
|
||||
var (
|
||||
noAutoBrowser bool
|
||||
template string
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &noAutoBrowser, "auth-no-open-browser", "", false, "Do not automatically open auth link in default browser")
|
||||
flags.StringVarP(cmdFlags, &template, "template", "", "", "The path to a custom Go template for generating HTML responses")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -30,15 +28,10 @@ Remote authorization. Used to authorize a remote or headless
|
||||
rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Use the --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 3, command, args)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser, template)
|
||||
return config.Authorize(context.Background(), args, noAutoBrowser)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -58,9 +58,6 @@ Pass arguments to the backend by placing them on the end of the line
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 1e6, command, args)
|
||||
name, remote := args[0], args[1]
|
||||
|
||||
@@ -115,9 +115,6 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "bisync remote1:path1 remote2:path2",
|
||||
Short: shortHelp,
|
||||
Long: longHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
@@ -25,10 +25,6 @@ var commandDefinition = &cobra.Command{
|
||||
Print cache stats for a remote in JSON format
|
||||
`,
|
||||
Hidden: true,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
"status": "Deprecated",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs.Logf(nil, `"rclone cachestats" is deprecated, use "rclone backend stats %s" instead`, args[0])
|
||||
|
||||
@@ -57,9 +57,6 @@ the end and |--offset| and |--count| to print a section in the middle.
|
||||
Note that if offset is negative it will count from the end, so
|
||||
|--offset -1 --count 1| is equivalent to |--tail 1|.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
usedOffset := offset != 0 || count >= 0
|
||||
usedHead := head > 0
|
||||
|
||||
@@ -37,9 +37,6 @@ that don't support hashes or if you really want to check all the data.
|
||||
|
||||
Note that hash values in the SUM file are treated as case insensitive.
|
||||
`, "|", "`") + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(3, 3, command, args)
|
||||
var hashType hash.Type
|
||||
|
||||
@@ -20,9 +20,6 @@ var commandDefinition = &cobra.Command{
|
||||
Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
12
cmd/cmd.go
12
cmd/cmd.go
@@ -73,13 +73,11 @@ func ShowVersion() {
|
||||
|
||||
linking, tagString := buildinfo.GetLinkingAndTags()
|
||||
|
||||
arch := buildinfo.GetArch()
|
||||
|
||||
fmt.Printf("rclone %s\n", fs.Version)
|
||||
fmt.Printf("- os/version: %s\n", osVersion)
|
||||
fmt.Printf("- os/kernel: %s\n", osKernel)
|
||||
fmt.Printf("- os/type: %s\n", runtime.GOOS)
|
||||
fmt.Printf("- os/arch: %s\n", arch)
|
||||
fmt.Printf("- os/arch: %s\n", runtime.GOARCH)
|
||||
fmt.Printf("- go/version: %s\n", runtime.Version())
|
||||
fmt.Printf("- go/linking: %s\n", linking)
|
||||
fmt.Printf("- go/tags: %s\n", tagString)
|
||||
@@ -401,15 +399,9 @@ func initConfig() {
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
|
||||
// Configure console
|
||||
// Hide console window
|
||||
if ci.NoConsole {
|
||||
// Hide the console window
|
||||
terminal.HideConsole()
|
||||
} else {
|
||||
// Enable color support on stdout if possible.
|
||||
// This enables virtual terminal processing on Windows 10,
|
||||
// adding native support for ANSI/VT100 escape sequences.
|
||||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -568,21 +567,6 @@ func (fsys *FS) Listxattr(path string, fill func(name string) bool) (errc int) {
|
||||
return -fuse.ENOSYS
|
||||
}
|
||||
|
||||
// Getpath allows a case-insensitive file system to report the correct case of
|
||||
// a file path.
|
||||
func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string) {
|
||||
defer log.Trace(path, "Getpath fh=%d", fh)("errc=%d, normalisedPath=%q", &errc, &normalisedPath)
|
||||
node, _, errc := fsys.getNode(path, fh)
|
||||
if errc != 0 {
|
||||
return errc, ""
|
||||
}
|
||||
normalisedPath = node.Path()
|
||||
if !strings.HasPrefix("/", normalisedPath) {
|
||||
normalisedPath = "/" + normalisedPath
|
||||
}
|
||||
return 0, normalisedPath
|
||||
}
|
||||
|
||||
// Translate errors from mountlib
|
||||
func translateError(err error) (errc int) {
|
||||
if err == nil {
|
||||
@@ -647,7 +631,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
|
||||
var (
|
||||
_ fuse.FileSystemInterface = (*FS)(nil)
|
||||
_ fuse.FileSystemOpenEx = (*FS)(nil)
|
||||
_ fuse.FileSystemGetpath = (*FS)(nil)
|
||||
//_ fuse.FileSystemChflags = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetcrtime = (*FS)(nil)
|
||||
//_ fuse.FileSystemSetchgtime = (*FS)(nil)
|
||||
|
||||
@@ -84,6 +84,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
|
||||
if opt.DaemonTimeout != 0 {
|
||||
options = append(options, "-o", fmt.Sprintf("daemon_timeout=%d", int(opt.DaemonTimeout.Seconds())))
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, "-o", "nonempty")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, "-o", "allow_other")
|
||||
}
|
||||
@@ -149,14 +152,14 @@ func waitFor(fn func() bool) (ok bool) {
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
// Get mountpoint using OS specific logic
|
||||
f := VFS.Fs()
|
||||
mountpoint, err := getMountpoint(f, mountPath, opt)
|
||||
mountpoint, err := getMountpoint(mountPath, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(nil, "Mounting on %q (%q)", mountpoint, opt.VolumeName)
|
||||
|
||||
// Create underlying FS
|
||||
f := VFS.Fs()
|
||||
fsys := NewFS(VFS)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
host.SetCapReaddirPlus(true) // only works on Windows
|
||||
|
||||
@@ -9,10 +9,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, error) {
|
||||
func getMountpoint(mountPath string, opt *mountlib.Options) (string, error) {
|
||||
fi, err := os.Stat(mountPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to retrieve mount path information: %w", err)
|
||||
@@ -20,11 +19,5 @@ func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, er
|
||||
if !fi.IsDir() {
|
||||
return "", errors.New("mount path is not a directory")
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountPath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return mountPath, nil
|
||||
}
|
||||
|
||||
@@ -18,13 +18,13 @@ import (
|
||||
var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
|
||||
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
||||
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\]+\\[^\\]`)
|
||||
|
||||
// isNetworkSharePath returns true if the given string is a valid network share path,
|
||||
// in the basic UNC format "\\Server\Share\Path", where the first two path components
|
||||
// are required ("\\Server\Share", which represents the volume).
|
||||
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
||||
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
||||
// not supported by cgofuse/winfsp.
|
||||
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
||||
// paths using prefix "\\?\", and not necessarily network resource UNC paths.
|
||||
func isNetworkSharePath(l string) bool {
|
||||
@@ -94,7 +94,7 @@ func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (strin
|
||||
}
|
||||
|
||||
// handleLocalMountpath handles the case where mount path is a local file system path.
|
||||
func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (string, error) {
|
||||
func handleLocalMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
||||
// Assuming path is drive letter or directory path, not network share (UNC) path.
|
||||
// If drive letter: Must be given as a single character followed by ":" and nothing else.
|
||||
// Else, assume directory path: Directory must not exist, but its parent must.
|
||||
@@ -125,9 +125,6 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
||||
}
|
||||
return "", fmt.Errorf("failed to retrieve mountpoint directory parent information: %w", err)
|
||||
}
|
||||
if err = mountlib.CheckOverlap(f, mountpath); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return mountpath, nil
|
||||
}
|
||||
@@ -161,19 +158,9 @@ func handleVolumeName(opt *mountlib.Options, volumeName string) {
|
||||
|
||||
// getMountpoint handles mounting details on Windows,
|
||||
// where disk and network based file systems are treated different.
|
||||
func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
// Inform about some options not relevant in this mode
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
func getMountpoint(mountpath string, opt *mountlib.Options) (mountpoint string, err error) {
|
||||
|
||||
// Handle mountpath
|
||||
// First handle mountpath
|
||||
var volumeName string
|
||||
if isDefaultPath(mountpath) {
|
||||
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
||||
@@ -185,10 +172,10 @@ func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint
|
||||
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||
} else {
|
||||
// Mount path is drive letter or directory path.
|
||||
mountpoint, err = handleLocalMountpath(f, mountpath, opt)
|
||||
mountpoint, err = handleLocalMountpath(mountpath, opt)
|
||||
}
|
||||
|
||||
// Handle volume name
|
||||
// Second handle volume name
|
||||
handleVolumeName(opt, volumeName)
|
||||
|
||||
// Done, return mountpoint to be used, together with updated mount options.
|
||||
|
||||
@@ -44,9 +44,6 @@ var configCommand = &cobra.Command{
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.EditConfig(context.Background())
|
||||
@@ -57,18 +54,12 @@ var configEditCommand = &cobra.Command{
|
||||
Use: "edit",
|
||||
Short: configCommand.Short,
|
||||
Long: configCommand.Long,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: configCommand.Run,
|
||||
Run: configCommand.Run,
|
||||
}
|
||||
|
||||
var configFileCommand = &cobra.Command{
|
||||
Use: "file",
|
||||
Short: `Show path of configuration file in use.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.ShowConfigLocation()
|
||||
@@ -78,9 +69,6 @@ var configFileCommand = &cobra.Command{
|
||||
var configTouchCommand = &cobra.Command{
|
||||
Use: "touch",
|
||||
Short: `Ensure configuration file exists.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.SaveConfig()
|
||||
@@ -90,9 +78,6 @@ var configTouchCommand = &cobra.Command{
|
||||
var configPathsCommand = &cobra.Command{
|
||||
Use: "paths",
|
||||
Short: `Show paths used for configuration, cache, temp etc.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.57",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
fmt.Printf("Config file: %s\n", config.GetConfigPath())
|
||||
@@ -104,9 +89,6 @@ var configPathsCommand = &cobra.Command{
|
||||
var configShowCommand = &cobra.Command{
|
||||
Use: "show [<remote>]",
|
||||
Short: `Print (decrypted) config file, or the config for a single remote.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 {
|
||||
@@ -121,9 +103,6 @@ var configShowCommand = &cobra.Command{
|
||||
var configDumpCommand = &cobra.Command{
|
||||
Use: "dump",
|
||||
Short: `Dump the config file as JSON.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.Dump()
|
||||
@@ -133,9 +112,6 @@ var configDumpCommand = &cobra.Command{
|
||||
var configProvidersCommand = &cobra.Command{
|
||||
Use: "providers",
|
||||
Short: `List in JSON format all the providers and options.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
return config.JSONListProviders()
|
||||
@@ -176,7 +152,7 @@ This will look something like (some irrelevant detail removed):
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Help": "Use auto config?\n * Say Y if not sure\n * Say N if you are working on a remote or headless machine\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
@@ -250,9 +226,6 @@ using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(2, 256, command, args)
|
||||
in, err := argsToMap(args[2:])
|
||||
@@ -316,9 +289,6 @@ require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
@@ -334,9 +304,6 @@ require this add an extra parameter thus:
|
||||
var configDeleteCommand = &cobra.Command{
|
||||
Use: "delete name",
|
||||
Short: "Delete an existing remote.",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
config.DeleteRemote(args[0])
|
||||
@@ -359,9 +326,6 @@ For example, to set password of a remote of name myremote you would do:
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 256, command, args)
|
||||
in, err := argsToMap(args[1:])
|
||||
|
||||
@@ -46,9 +46,6 @@ the destination.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
@@ -51,9 +51,6 @@ destination if there is one with the same name.
|
||||
Setting ` + "`--stdout`" + ` or making the output file name ` + "`-`" + `
|
||||
will cause the output to be written to standard output.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) (err error) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
|
||||
|
||||
@@ -47,9 +47,6 @@ the files in remote:path.
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||
|
||||
@@ -41,9 +41,6 @@ use it like this
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 11, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
|
||||
@@ -135,9 +135,6 @@ Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 2, command, args)
|
||||
if len(args) > 1 {
|
||||
|
||||
@@ -53,9 +53,6 @@ delete all files bigger than 100 MiB.
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -23,17 +22,14 @@ Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(true, false, command, func() error {
|
||||
if fileName == "" {
|
||||
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
|
||||
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
|
||||
}
|
||||
fileObj, err := f.NewObject(context.Background(), fileName)
|
||||
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -17,7 +17,4 @@ var completionDefinition = &cobra.Command{
|
||||
Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ type frontmatter struct {
|
||||
Slug string
|
||||
URL string
|
||||
Source string
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
var frontmatterTemplate = template.Must(template.New("frontmatter").Parse(`---
|
||||
@@ -39,9 +38,6 @@ title: "{{ .Title }}"
|
||||
description: "{{ .Description }}"
|
||||
slug: {{ .Slug }}
|
||||
url: {{ .URL }}
|
||||
{{- range $key, $value := .Annotations }}
|
||||
{{ $key }}: {{ $value }}
|
||||
{{- end }}
|
||||
# autogenerated - DO NOT EDIT, instead edit the source code in {{ .Source }} and as part of making a release run "make commanddocs"
|
||||
---
|
||||
`))
|
||||
@@ -53,9 +49,6 @@ var commandDefinition = &cobra.Command{
|
||||
This produces markdown docs for the rclone commands to the directory
|
||||
supplied. These are in a format suitable for hugo to render into the
|
||||
rclone.org website.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
@@ -82,24 +75,17 @@ rclone.org website.`,
|
||||
return err
|
||||
}
|
||||
|
||||
// Look up name => details for prepender
|
||||
type commandDetails struct {
|
||||
Short string
|
||||
Annotations map[string]string
|
||||
}
|
||||
var commands = map[string]commandDetails{}
|
||||
var addCommandDetails func(root *cobra.Command)
|
||||
addCommandDetails = func(root *cobra.Command) {
|
||||
// Look up name => description for prepender
|
||||
var description = map[string]string{}
|
||||
var addDescription func(root *cobra.Command)
|
||||
addDescription = func(root *cobra.Command) {
|
||||
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
|
||||
commands[name] = commandDetails{
|
||||
Short: root.Short,
|
||||
Annotations: root.Annotations,
|
||||
}
|
||||
description[name] = root.Short
|
||||
for _, c := range root.Commands() {
|
||||
addCommandDetails(c)
|
||||
addDescription(c)
|
||||
}
|
||||
}
|
||||
addCommandDetails(cmd.Root)
|
||||
addDescription(cmd.Root)
|
||||
|
||||
// markup for the docs files
|
||||
prepender := func(filename string) string {
|
||||
@@ -108,11 +94,10 @@ rclone.org website.`,
|
||||
data := frontmatter{
|
||||
Date: now,
|
||||
Title: strings.ReplaceAll(base, "_", " "),
|
||||
Description: commands[name].Short,
|
||||
Description: description[name],
|
||||
Slug: base,
|
||||
URL: "/commands/" + strings.ToLower(base) + "/",
|
||||
Source: strings.ReplaceAll(strings.ReplaceAll(base, "rclone", "cmd"), "_", "/") + "/",
|
||||
Annotations: commands[name].Annotations,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := frontmatterTemplate.Execute(&buf, data)
|
||||
|
||||
@@ -112,9 +112,6 @@ Then
|
||||
|
||||
Note that hash names are case insensitive and values are output in lower case.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 2, command, args)
|
||||
if len(args) == 0 {
|
||||
|
||||
@@ -49,9 +49,6 @@ link. Exact capabilities depend on the remote, but the link will
|
||||
always by default be created with the least constraints – e.g. no
|
||||
expiry, no password protection, accessible without account.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc, remote := cmd.NewFsFile(args[0])
|
||||
|
||||
@@ -30,9 +30,6 @@ rclone listremotes lists all the available remotes from the config file.
|
||||
|
||||
When used with the ` + "`--long`" + ` flag it lists the types too.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.34",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
remotes := config.FileSections()
|
||||
|
||||
@@ -142,9 +142,6 @@ those only (without traversing the whole directory structure):
|
||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -112,9 +112,6 @@ will be shown ("2017-05-31T16:15:57+01:00").
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written one to a line.
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
var fsrc fs.Fs
|
||||
|
||||
@@ -31,9 +31,6 @@ Eg
|
||||
37600 2016-06-25 18:55:40.814629136 fubuwic
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
|
||||
@@ -38,9 +38,6 @@ by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
as a relative path).
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.MD5, args, 0); found {
|
||||
|
||||
@@ -34,6 +34,9 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
if opt.AsyncRead {
|
||||
options = append(options, fuse.AsyncRead())
|
||||
}
|
||||
if opt.AllowNonEmpty {
|
||||
options = append(options, fuse.AllowNonEmptyMount())
|
||||
}
|
||||
if opt.AllowOther {
|
||||
options = append(options, fuse.AllowOther())
|
||||
}
|
||||
@@ -69,17 +72,9 @@ func mountOptions(VFS *vfs.VFS, device string, opt *mountlib.Options) (options [
|
||||
// returns an error, and an error channel for the serve process to
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if runtime.GOOS == "darwin" {
|
||||
fs.Logf(nil, "macOS users: please try \"rclone cmount\" as it will be the default in v1.54")
|
||||
}
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
if opt.DebugFUSE {
|
||||
fuse.Debug = func(msg interface{}) {
|
||||
@@ -87,6 +82,8 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
||||
}
|
||||
}
|
||||
|
||||
f := VFS.Fs()
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
@@ -95,6 +95,9 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
}
|
||||
var opts []string
|
||||
// FIXME doesn't work opts = append(opts, fmt.Sprintf("max_readahead=%d", maxReadAhead))
|
||||
if fsys.opt.AllowNonEmpty {
|
||||
opts = append(opts, "nonempty")
|
||||
}
|
||||
if fsys.opt.AllowOther {
|
||||
opts = append(opts, "allow_other")
|
||||
}
|
||||
@@ -145,16 +148,9 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
||||
// report an error when fusermount is called.
|
||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error, func() error, error) {
|
||||
f := VFS.Fs()
|
||||
if err := mountlib.CheckOverlap(f, mountpoint); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||
|
||||
fsys := NewFS(VFS, opt)
|
||||
|
||||
// nodeFsOpts := &fusefs.PathNodeFsOptions{
|
||||
// ClientInodes: false,
|
||||
// Debug: mountlib.DebugFUSE,
|
||||
|
||||
@@ -33,20 +33,12 @@ func CheckMountEmpty(mountpoint string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
|
||||
}
|
||||
foundAutofs := false
|
||||
for _, entry := range entries {
|
||||
if entry.Dir == mountpointAbs {
|
||||
if entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
foundAutofs = true
|
||||
if entry.Dir == mountpointAbs && entry.Type != "autofs" {
|
||||
return fmt.Errorf(msg, mountpointAbs)
|
||||
}
|
||||
}
|
||||
// It isn't safe to list an autofs in the middle of mounting
|
||||
if foundAutofs {
|
||||
return nil
|
||||
}
|
||||
return checkMountEmpty(mountpoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckMountReady checks whether mountpoint is mounted by rclone.
|
||||
|
||||
@@ -4,13 +4,33 @@
|
||||
package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// CheckMountEmpty checks if mountpoint folder is empty.
|
||||
// On non-Linux unixes we list directory to ensure that.
|
||||
func CheckMountEmpty(mountpoint string) error {
|
||||
return checkMountEmpty(mountpoint)
|
||||
fp, err := os.Open(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open: %s: %w", mountpoint, err)
|
||||
}
|
||||
defer fs.CheckClose(fp, &err)
|
||||
|
||||
_, err = fp.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
|
||||
const msg = "directory is not empty, use --allow-non-empty to mount anyway: %s"
|
||||
if err == nil {
|
||||
return fmt.Errorf(msg, mountpoint)
|
||||
}
|
||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||
}
|
||||
|
||||
// CheckMountReady should check if mountpoint is mounted by rclone.
|
||||
|
||||
@@ -159,59 +159,38 @@ group "Everyone" will be used to represent others. The user/group can be customi
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
e.g. |-o UserName=user123 -o GroupName="Authenticated Users"|.
|
||||
The permissions on each entry will be set according to [options](#options)
|
||||
|--dir-perms| and |--file-perms|, which takes a value in traditional Unix
|
||||
|--dir-perms| and |--file-perms|, which takes a value in traditional
|
||||
[numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation).
|
||||
|
||||
The default permissions corresponds to |--file-perms 0666 --dir-perms 0777|,
|
||||
i.e. read and write permissions to everyone. This means you will not be able
|
||||
to start any programs from the mount. To be able to do that you must add
|
||||
execute permissions, e.g. |--file-perms 0777 --dir-perms 0777| to add it
|
||||
to everyone. If the program needs to write files, chances are you will
|
||||
have to enable [VFS File Caching](#vfs-file-caching) as well (see also
|
||||
[limitations](#limitations)). Note that the default write permission have
|
||||
some restrictions for accounts other than the owner, specifically it lacks
|
||||
the "write extended attributes", as explained next.
|
||||
to everyone. If the program needs to write files, chances are you will have
|
||||
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
||||
|
||||
The mapping of permissions is not always trivial, and the result you see in
|
||||
Windows Explorer may not be exactly like you expected. For example, when setting
|
||||
a value that includes write access for the group or others scope, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and
|
||||
"append data", but not "write extended attributes". Windows will then show this
|
||||
as basic permission "Special" instead of "Write", because "Write" also covers
|
||||
the "write extended attributes" permission. When setting digit 0 for group or
|
||||
others, to indicate no permissions, they will still get individual permissions
|
||||
"read attributes", "read extended attributes" and "read permissions". This is
|
||||
done for compatibility reasons, e.g. to allow users without additional
|
||||
permissions to be able to read basic metadata about files like in Unix.
|
||||
Note that the mapping of permissions is not always trivial, and the result
|
||||
you see in Windows Explorer may not be exactly like you expected.
|
||||
For example, when setting a value that includes write access, this will be
|
||||
mapped to individual permissions "write attributes", "write data" and "append data",
|
||||
but not "write extended attributes". Windows will then show this as basic
|
||||
permission "Special" instead of "Write", because "Write" includes the
|
||||
"write extended attributes" permission.
|
||||
|
||||
WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity",
|
||||
If you set POSIX permissions for only allowing access to the owner, using
|
||||
|--file-perms 0600 --dir-perms 0700|, the user group and the built-in "Everyone"
|
||||
group will still be given some special permissions, such as "read attributes"
|
||||
and "read permissions", in Windows. This is done for compatibility reasons,
|
||||
e.g. to allow users without additional permissions to be able to read basic
|
||||
metadata about files like in UNIX. One case that may arise is that other programs
|
||||
(incorrectly) interprets this as the file being accessible by everyone. For example
|
||||
an SSH client may warn about "unprotected private key file".
|
||||
|
||||
WinFsp 2021 (version 1.9) introduces a new FUSE option "FileSecurity",
|
||||
that allows the complete specification of file security descriptors using
|
||||
[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
|
||||
With this you get detailed control of the resulting permissions, compared
|
||||
to use of the POSIX permissions described above, and no additional permissions
|
||||
will be added automatically for compatibility with Unix. Some example use
|
||||
cases will following.
|
||||
|
||||
If you set POSIX permissions for only allowing access to the owner,
|
||||
using |--file-perms 0600 --dir-perms 0700|, the user group and the built-in
|
||||
"Everyone" group will still be given some special permissions, as described
|
||||
above. Some programs may then (incorrectly) interpret this as the file being
|
||||
accessible by everyone, for example an SSH client may warn about "unprotected
|
||||
private key file". You can work around this by specifying
|
||||
|-o FileSecurity="D:P(A;;FA;;;OW)"|, which sets file all access (FA) to the
|
||||
owner (OW), and nothing else.
|
||||
|
||||
When setting write permissions then, except for the owner, this does not
|
||||
include the "write extended attributes" permission, as mentioned above.
|
||||
This may prevent applications from writing to files, giving permission denied
|
||||
error instead. To set working write permissions for the built-in "Everyone"
|
||||
group, similar to what it gets by default but with the addition of the
|
||||
"write extended attributes", you can specify
|
||||
|-o FileSecurity="D:P(A;;FRFW;;;WD)"|, which sets file read (FR) and file
|
||||
write (FW) to everyone (WD). If file execute (FX) is also needed, then change
|
||||
to |-o FileSecurity="D:P(A;;FRFWFX;;;WD)"|, or set file all access (FA) to
|
||||
get full access permissions, including delete, with
|
||||
|-o FileSecurity="D:P(A;;FA;;;WD)"|.
|
||||
With this you can work around issues such as the mentioned "unprotected private key file"
|
||||
by specifying |-o FileSecurity="D:P(A;;FA;;;OW)"|, for file all access (FA) to the owner (OW).
|
||||
|
||||
#### Windows caveats
|
||||
|
||||
@@ -240,16 +219,10 @@ processes as the SYSTEM account. Another alternative is to run the mount
|
||||
command from a Windows Scheduled Task, or a Windows Service, configured
|
||||
to run as the SYSTEM account. A third alternative is to use the
|
||||
[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
Note that when running rclone as another user, it will not use
|
||||
the configuration file from your profile unless you tell it to
|
||||
with the [|--config|](https://rclone.org/docs/#config-config-file) option.
|
||||
Note also that it is now the SYSTEM account that will have the owner
|
||||
permissions, and other accounts will have permissions according to the
|
||||
group or others scopes. As mentioned above, these will then not get the
|
||||
"write extended attributes" permission, and this may prevent writing to
|
||||
files. You can work around this with the FileSecurity option, see
|
||||
example above.
|
||||
Read more in the [install documentation](https://rclone.org/install/).
|
||||
|
||||
Note that mapping to a directory path, instead of a drive letter,
|
||||
does not suffer from the same limitations.
|
||||
|
||||
@@ -157,9 +157,6 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: strings.ReplaceAll(strings.ReplaceAll(mountHelp, "|", "`"), "@", commandName) + vfs.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
|
||||
@@ -237,8 +234,13 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
|
||||
// Mount the remote at mountpoint
|
||||
func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||
if err = m.CheckOverlap(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure sensible defaults
|
||||
if err = m.CheckAllowed(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@ package mountlib
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -34,22 +32,22 @@ func ClipBlocks(b *uint64) {
|
||||
}
|
||||
}
|
||||
|
||||
// CheckOverlap checks that root doesn't overlap with a mountpoint
|
||||
func CheckOverlap(f fs.Fs, mountpoint string) error {
|
||||
name := f.Name()
|
||||
// CheckOverlap checks that root doesn't overlap with mountpoint
|
||||
func (m *MountPoint) CheckOverlap() error {
|
||||
name := m.Fs.Name()
|
||||
if name != "" && name != "local" {
|
||||
return nil
|
||||
}
|
||||
rootAbs := absPath(f.Root())
|
||||
mountpointAbs := absPath(mountpoint)
|
||||
rootAbs := absPath(m.Fs.Root())
|
||||
mountpointAbs := absPath(m.MountPoint)
|
||||
if strings.HasPrefix(rootAbs, mountpointAbs) || strings.HasPrefix(mountpointAbs, rootAbs) {
|
||||
const msg = "mount point %q (%q) and directory to be mounted %q (%q) mustn't overlap"
|
||||
return fmt.Errorf(msg, mountpoint, mountpointAbs, f.Root(), rootAbs)
|
||||
const msg = "mount point %q and directory to be mounted %q mustn't overlap"
|
||||
return fmt.Errorf(msg, m.MountPoint, m.Fs.Root())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// absPath is a helper function for CheckOverlap
|
||||
// absPath is a helper function for MountPoint.CheckOverlap
|
||||
func absPath(path string) string {
|
||||
if abs, err := filepath.EvalSymlinks(path); err == nil {
|
||||
path = abs
|
||||
@@ -58,43 +56,32 @@ func absPath(path string) string {
|
||||
path = abs
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
if runtime.GOOS == "windows" {
|
||||
// Removes any UNC long path prefix to make sure a simple HasPrefix test
|
||||
// in CheckOverlap works when one is UNC (root) and one is not (mountpoint).
|
||||
path = strings.TrimPrefix(path, `//?/`)
|
||||
}
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path += "/"
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// CheckAllowNonEmpty checks --allow-non-empty flag, and if not used verifies that mountpoint is empty.
|
||||
func CheckAllowNonEmpty(mountpoint string, opt *Options) error {
|
||||
if !opt.AllowNonEmpty {
|
||||
return CheckMountEmpty(mountpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMountEmpty checks if mountpoint folder is empty by listing it.
|
||||
func checkMountEmpty(mountpoint string) error {
|
||||
fp, err := os.Open(mountpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open: %s: %w", mountpoint, err)
|
||||
}
|
||||
defer fs.CheckClose(fp, &err)
|
||||
|
||||
_, err = fp.Readdirnames(1)
|
||||
if err == io.EOF {
|
||||
// CheckAllowed informs about ignored flags on Windows. If not on Windows
|
||||
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
|
||||
func (m *MountPoint) CheckAllowed() error {
|
||||
opt := &m.MountOpt
|
||||
if runtime.GOOS == "windows" {
|
||||
if opt.AllowNonEmpty {
|
||||
fs.Logf(nil, "--allow-non-empty flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowRoot {
|
||||
fs.Logf(nil, "--allow-root flag does nothing on Windows")
|
||||
}
|
||||
if opt.AllowOther {
|
||||
fs.Logf(nil, "--allow-other flag does nothing on Windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const msg = "%q is not empty, use --allow-non-empty to mount anyway"
|
||||
if err == nil {
|
||||
return fmt.Errorf(msg, mountpoint)
|
||||
if !opt.AllowNonEmpty {
|
||||
return CheckMountEmpty(m.MountPoint)
|
||||
}
|
||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetVolumeName with sensible default
|
||||
|
||||
@@ -60,9 +60,6 @@ can speed transfers up greatly.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.19",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||
|
||||
@@ -49,9 +49,6 @@ successful transfer.
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||
|
||||
234
cmd/ncdu/ncdu.go
234
cmd/ncdu/ncdu.go
@@ -13,14 +13,13 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/atotto/clipboard"
|
||||
"github.com/gdamore/tcell/v2"
|
||||
"github.com/gdamore/tcell/v2/termbox"
|
||||
runewidth "github.com/mattn/go-runewidth"
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/ncdu/scan"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rivo/uniseg"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -74,9 +73,6 @@ For a non-interactive listing of the remote, see the
|
||||
[tree](/commands/rclone_tree/) command. To just get the total size of
|
||||
the remote you can also use the [size](/commands/rclone_size/) command.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
@@ -118,7 +114,6 @@ func helpText() (tr []string) {
|
||||
|
||||
// UI contains the state of the user interface
|
||||
type UI struct {
|
||||
s tcell.Screen
|
||||
f fs.Fs // fs being displayed
|
||||
fsName string // human name of Fs
|
||||
root *scan.Dir // root directory
|
||||
@@ -155,91 +150,77 @@ type dirPos struct {
|
||||
offset int
|
||||
}
|
||||
|
||||
// graphemeWidth returns the number of cells in rs.
|
||||
//
|
||||
// The original [runewidth.StringWidth] iterates through graphemes
|
||||
// and uses this same logic. To avoid iterating through graphemes
|
||||
// repeatedly, we separate that out into its own function.
|
||||
func graphemeWidth(rs []rune) (wd int) {
|
||||
// copied/adapted from [runewidth.StringWidth]
|
||||
for _, r := range rs {
|
||||
wd = runewidth.RuneWidth(r)
|
||||
if wd > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Print a string
|
||||
func (u *UI) Print(x, y int, style tcell.Style, msg string) {
|
||||
g := uniseg.NewGraphemes(msg)
|
||||
for g.Next() {
|
||||
rs := g.Runes()
|
||||
u.s.SetContent(x, y, rs[0], rs[1:], style)
|
||||
x += graphemeWidth(rs)
|
||||
func Print(x, y int, fg, bg termbox.Attribute, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
}
|
||||
}
|
||||
|
||||
// Printf a string
|
||||
func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...interface{}) {
|
||||
func Printf(x, y int, fg, bg termbox.Attribute, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
u.Print(x, y, style, s)
|
||||
Print(x, y, fg, bg, s)
|
||||
}
|
||||
|
||||
// Line prints a string to given xmax, with given space
|
||||
func (u *UI) Line(x, y, xmax int, style tcell.Style, spacer rune, msg string) {
|
||||
g := uniseg.NewGraphemes(msg)
|
||||
for g.Next() {
|
||||
rs := g.Runes()
|
||||
u.s.SetContent(x, y, rs[0], rs[1:], style)
|
||||
x += graphemeWidth(rs)
|
||||
func Line(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, msg string) {
|
||||
for _, c := range msg {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x += runewidth.RuneWidth(c)
|
||||
if x >= xmax {
|
||||
return
|
||||
}
|
||||
}
|
||||
for ; x < xmax; x++ {
|
||||
u.s.SetContent(x, y, spacer, nil, style)
|
||||
termbox.SetCell(x, y, spacer, fg, bg)
|
||||
}
|
||||
}
|
||||
|
||||
// Linef a string
|
||||
func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...interface{}) {
|
||||
func Linef(x, y, xmax int, fg, bg termbox.Attribute, spacer rune, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
u.Line(x, y, xmax, style, spacer, s)
|
||||
Line(x, y, xmax, fg, bg, spacer, s)
|
||||
}
|
||||
|
||||
// LineOptions Print line of selectable options
|
||||
func (u *UI) LineOptions(x, y, xmax int, style tcell.Style, options []string, selected int) {
|
||||
for x := x; x < xmax; x++ {
|
||||
u.s.SetContent(x, y, ' ', nil, style) // fill
|
||||
func LineOptions(x, y, xmax int, fg, bg termbox.Attribute, options []string, selected int) {
|
||||
defaultBg := bg
|
||||
defaultFg := fg
|
||||
|
||||
// Print left+right whitespace to center the options
|
||||
xoffset := ((xmax - x) - lineOptionLength(options)) / 2
|
||||
for j := x; j < x+xoffset; j++ {
|
||||
termbox.SetCell(j, y, ' ', fg, bg)
|
||||
}
|
||||
x += ((xmax - x) - lineOptionLength(options)) / 2 // center
|
||||
for j := xmax - xoffset; j < xmax; j++ {
|
||||
termbox.SetCell(j, y, ' ', fg, bg)
|
||||
}
|
||||
x += xoffset
|
||||
|
||||
for i, o := range options {
|
||||
u.s.SetContent(x, y, ' ', nil, style)
|
||||
x++
|
||||
termbox.SetCell(x, y, ' ', fg, bg)
|
||||
|
||||
ostyle := style
|
||||
if i == selected {
|
||||
ostyle = tcell.StyleDefault
|
||||
bg = termbox.ColorBlack
|
||||
fg = termbox.ColorWhite
|
||||
}
|
||||
termbox.SetCell(x+1, y, '<', fg, bg)
|
||||
x += 2
|
||||
|
||||
// print option text
|
||||
for _, c := range o {
|
||||
termbox.SetCell(x, y, c, fg, bg)
|
||||
x++
|
||||
}
|
||||
|
||||
u.s.SetContent(x, y, '<', nil, ostyle)
|
||||
x++
|
||||
termbox.SetCell(x, y, '>', fg, bg)
|
||||
bg = defaultBg
|
||||
fg = defaultFg
|
||||
|
||||
g := uniseg.NewGraphemes(o)
|
||||
for g.Next() {
|
||||
rs := g.Runes()
|
||||
u.s.SetContent(x, y, rs[0], rs[1:], ostyle)
|
||||
x += graphemeWidth(rs)
|
||||
}
|
||||
|
||||
u.s.SetContent(x, y, '>', nil, ostyle)
|
||||
x++
|
||||
|
||||
u.s.SetContent(x, y, ' ', nil, style)
|
||||
x++
|
||||
termbox.SetCell(x+1, y, ' ', fg, bg)
|
||||
x += 2
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,7 +234,7 @@ func lineOptionLength(o []string) int {
|
||||
|
||||
// Box the u.boxText onto the screen
|
||||
func (u *UI) Box() {
|
||||
w, h := u.s.Size()
|
||||
w, h := termbox.Size()
|
||||
|
||||
// Find dimensions of text
|
||||
boxWidth := 10
|
||||
@@ -279,31 +260,31 @@ func (u *UI) Box() {
|
||||
ymax := y + len(u.boxText)
|
||||
|
||||
// draw text
|
||||
style := tcell.StyleDefault.Background(tcell.ColorRed).Reverse(true)
|
||||
fg, bg := termbox.ColorRed, termbox.ColorWhite
|
||||
for i, s := range u.boxText {
|
||||
u.Line(x, y+i, xmax, style, ' ', s)
|
||||
style = tcell.StyleDefault.Reverse(true)
|
||||
Line(x, y+i, xmax, fg, bg, ' ', s)
|
||||
fg = termbox.ColorBlack
|
||||
}
|
||||
|
||||
if len(u.boxMenu) != 0 {
|
||||
u.LineOptions(x, ymax, xmax, style, u.boxMenu, u.boxMenuButton)
|
||||
ymax++
|
||||
LineOptions(x, ymax-1, xmax, fg, bg, u.boxMenu, u.boxMenuButton)
|
||||
}
|
||||
|
||||
// draw top border
|
||||
for i := y; i < ymax; i++ {
|
||||
u.s.SetContent(x-1, i, tcell.RuneVLine, nil, style)
|
||||
u.s.SetContent(xmax, i, tcell.RuneVLine, nil, style)
|
||||
termbox.SetCell(x-1, i, '│', fg, bg)
|
||||
termbox.SetCell(xmax, i, '│', fg, bg)
|
||||
}
|
||||
for j := x; j < xmax; j++ {
|
||||
u.s.SetContent(j, y-1, tcell.RuneHLine, nil, style)
|
||||
u.s.SetContent(j, ymax, tcell.RuneHLine, nil, style)
|
||||
termbox.SetCell(j, y-1, '─', fg, bg)
|
||||
termbox.SetCell(j, ymax, '─', fg, bg)
|
||||
}
|
||||
|
||||
u.s.SetContent(x-1, y-1, tcell.RuneULCorner, nil, style)
|
||||
u.s.SetContent(xmax, y-1, tcell.RuneURCorner, nil, style)
|
||||
u.s.SetContent(x-1, ymax, tcell.RuneLLCorner, nil, style)
|
||||
u.s.SetContent(xmax, ymax, tcell.RuneLRCorner, nil, style)
|
||||
termbox.SetCell(x-1, y-1, '┌', fg, bg)
|
||||
termbox.SetCell(xmax, y-1, '┐', fg, bg)
|
||||
termbox.SetCell(x-1, ymax, '└', fg, bg)
|
||||
termbox.SetCell(xmax, ymax, '┘', fg, bg)
|
||||
}
|
||||
|
||||
func (u *UI) moveBox(to int) {
|
||||
@@ -355,17 +336,17 @@ func (u *UI) hasEmptyDir() bool {
|
||||
// Draw the current screen
|
||||
func (u *UI) Draw() error {
|
||||
ctx := context.Background()
|
||||
w, h := u.s.Size()
|
||||
w, h := termbox.Size()
|
||||
u.dirListHeight = h - 3
|
||||
|
||||
// Plot
|
||||
u.s.Clear()
|
||||
termbox.Clear(termbox.ColorWhite, termbox.ColorBlack)
|
||||
|
||||
// Header line
|
||||
u.Linef(0, 0, w, tcell.StyleDefault.Reverse(true), ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version)
|
||||
Linef(0, 0, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "rclone ncdu %s - use the arrow keys to navigate, press ? for help", fs.Version)
|
||||
|
||||
// Directory line
|
||||
u.Linef(0, 1, w, tcell.StyleDefault, '-', "-- %s ", u.path)
|
||||
Linef(0, 1, w, termbox.ColorWhite, termbox.ColorBlack, '-', "-- %s ", u.path)
|
||||
|
||||
// graphs
|
||||
const (
|
||||
@@ -396,18 +377,20 @@ func (u *UI) Draw() error {
|
||||
attrs, err = u.d.AttrI(u.sortPerm[n])
|
||||
}
|
||||
_, isSelected := u.selectedEntries[entry.String()]
|
||||
style := tcell.StyleDefault
|
||||
fg := termbox.ColorWhite
|
||||
if attrs.EntriesHaveErrors {
|
||||
style = style.Foreground(tcell.ColorYellow)
|
||||
fg = termbox.ColorYellow
|
||||
}
|
||||
if err != nil {
|
||||
style = style.Foreground(tcell.ColorRed)
|
||||
fg = termbox.ColorRed
|
||||
}
|
||||
const colorLightYellow = termbox.ColorYellow + 8
|
||||
if isSelected {
|
||||
style = style.Foreground(tcell.ColorLightYellow)
|
||||
fg = colorLightYellow
|
||||
}
|
||||
bg := termbox.ColorBlack
|
||||
if n == dirPos.entry {
|
||||
style = style.Reverse(true)
|
||||
fg, bg = bg, fg
|
||||
}
|
||||
mark := ' '
|
||||
if attrs.IsDir {
|
||||
@@ -466,30 +449,31 @@ func (u *UI) Draw() error {
|
||||
}
|
||||
extras += "[" + graph[graphBars-bars:2*graphBars-bars] + "] "
|
||||
}
|
||||
u.Linef(0, y, w, style, ' ', "%c %s %s%c%s%s",
|
||||
fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
Linef(0, y, w, fg, bg, ' ', "%c %s %s%c%s%s", fileFlag, operations.SizeStringField(attrs.Size, u.humanReadable, 12), extras, mark, path.Base(entry.Remote()), message)
|
||||
y++
|
||||
}
|
||||
}
|
||||
|
||||
// Footer
|
||||
if u.d == nil {
|
||||
u.Line(0, h-1, w, tcell.StyleDefault.Reverse(true), ' ', "Waiting for root directory...")
|
||||
Line(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Waiting for root directory...")
|
||||
} else {
|
||||
message := ""
|
||||
if u.listing {
|
||||
message = " [listing in progress]"
|
||||
}
|
||||
size, count := u.d.Attr()
|
||||
u.Linef(0, h-1, w, tcell.StyleDefault.Reverse(true), ' ', "Total usage: %s, Objects: %s%s",
|
||||
operations.SizeString(size, u.humanReadable), operations.CountString(count, u.humanReadable), message)
|
||||
Linef(0, h-1, w, termbox.ColorBlack, termbox.ColorWhite, ' ', "Total usage: %s, Objects: %s%s", operations.SizeString(size, u.humanReadable), operations.CountString(count, u.humanReadable), message)
|
||||
}
|
||||
|
||||
// Show the box on top if required
|
||||
if u.showBox {
|
||||
u.Box()
|
||||
}
|
||||
u.s.Show()
|
||||
err := termbox.Flush()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to flush screen: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -902,34 +886,37 @@ func NewUI(f fs.Fs) *UI {
|
||||
|
||||
// Show shows the user interface
|
||||
func (u *UI) Show() error {
|
||||
var err error
|
||||
u.s, err = tcell.NewScreen()
|
||||
err := termbox.Init()
|
||||
if err != nil {
|
||||
return fmt.Errorf("screen new: %w", err)
|
||||
return fmt.Errorf("termbox init: %w", err)
|
||||
}
|
||||
err = u.s.Init()
|
||||
if err != nil {
|
||||
return fmt.Errorf("screen init: %w", err)
|
||||
}
|
||||
defer u.s.Fini()
|
||||
defer termbox.Close()
|
||||
|
||||
// scan the disk in the background
|
||||
u.listing = true
|
||||
rootChan, errChan, updated := scan.Scan(context.Background(), u.f)
|
||||
|
||||
// Poll the events into a channel
|
||||
events := make(chan tcell.Event)
|
||||
go u.s.ChannelEvents(events, nil)
|
||||
events := make(chan termbox.Event)
|
||||
doneWithEvent := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
events <- termbox.PollEvent()
|
||||
<-doneWithEvent
|
||||
}
|
||||
}()
|
||||
|
||||
// Main loop, waiting for events and channels
|
||||
outer:
|
||||
for {
|
||||
//Reset()
|
||||
err := u.Draw()
|
||||
if err != nil {
|
||||
return fmt.Errorf("draw failed: %w", err)
|
||||
}
|
||||
var root *scan.Dir
|
||||
select {
|
||||
case root := <-rootChan:
|
||||
case root = <-rootChan:
|
||||
u.root = root
|
||||
u.setCurrentDir(root)
|
||||
case err := <-errChan:
|
||||
@@ -939,50 +926,39 @@ outer:
|
||||
u.listing = false
|
||||
case <-updated:
|
||||
// redraw
|
||||
// TODO: might want to limit updates per second
|
||||
// might want to limit updates per second
|
||||
u.sortCurrentDir()
|
||||
case ev := <-events:
|
||||
switch ev := ev.(type) {
|
||||
case *tcell.EventResize:
|
||||
if u.root != nil {
|
||||
u.sortCurrentDir() // redraw
|
||||
}
|
||||
u.s.Sync()
|
||||
case *tcell.EventKey:
|
||||
var c rune
|
||||
if k := ev.Key(); k == tcell.KeyRune {
|
||||
c = ev.Rune()
|
||||
} else {
|
||||
c = key(k)
|
||||
}
|
||||
switch c {
|
||||
case key(tcell.KeyEsc), key(tcell.KeyCtrlC), 'q':
|
||||
doneWithEvent <- true
|
||||
if ev.Type == termbox.EventKey {
|
||||
switch ev.Key + termbox.Key(ev.Ch) {
|
||||
case termbox.KeyEsc, termbox.KeyCtrlC, 'q':
|
||||
if u.showBox {
|
||||
u.showBox = false
|
||||
} else {
|
||||
break outer
|
||||
}
|
||||
case key(tcell.KeyDown), 'j':
|
||||
case termbox.KeyArrowDown, 'j':
|
||||
u.move(1)
|
||||
case key(tcell.KeyUp), 'k':
|
||||
case termbox.KeyArrowUp, 'k':
|
||||
u.move(-1)
|
||||
case key(tcell.KeyPgDn), '-', '_':
|
||||
case termbox.KeyPgdn, '-', '_':
|
||||
u.move(u.dirListHeight)
|
||||
case key(tcell.KeyPgUp), '=', '+':
|
||||
case termbox.KeyPgup, '=', '+':
|
||||
u.move(-u.dirListHeight)
|
||||
case key(tcell.KeyLeft), 'h':
|
||||
case termbox.KeyArrowLeft, 'h':
|
||||
if u.showBox {
|
||||
u.moveBox(-1)
|
||||
break
|
||||
}
|
||||
u.up()
|
||||
case key(tcell.KeyEnter):
|
||||
case termbox.KeyEnter:
|
||||
if len(u.boxMenu) > 0 {
|
||||
u.handleBoxOption()
|
||||
break
|
||||
}
|
||||
u.enter()
|
||||
case key(tcell.KeyRight), 'l':
|
||||
case termbox.KeyArrowRight, 'l':
|
||||
if u.showBox {
|
||||
u.moveBox(1)
|
||||
break
|
||||
@@ -1025,8 +1001,11 @@ outer:
|
||||
|
||||
// Refresh the screen. Not obvious what key to map
|
||||
// this onto, but ^L is a common choice.
|
||||
case key(tcell.KeyCtrlL):
|
||||
u.s.Sync()
|
||||
case termbox.KeyCtrlL:
|
||||
err := termbox.Sync()
|
||||
if err != nil {
|
||||
fs.Errorf(nil, "termbox sync returned error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1034,8 +1013,3 @@ outer:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// key returns a rune representing the key k. It is a negative value, to not collide with Unicode code-points.
|
||||
func key(k tcell.Key) rune {
|
||||
return rune(-k)
|
||||
}
|
||||
|
||||
@@ -42,9 +42,6 @@ obfuscating the hyphen itself.
|
||||
If you want to encrypt the config file then please use config file
|
||||
encryption - see [rclone config](/commands/rclone_config/) for more
|
||||
info.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
var password string
|
||||
|
||||
12
cmd/rc/rc.go
12
cmd/rc/rc.go
@@ -99,9 +99,6 @@ rclone rc server, e.g.:
|
||||
rclone rc --loopback operations/about fs=/
|
||||
|
||||
Use ` + "`rclone rc`" + ` to see a list of all possible commands.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1e9, command, args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
@@ -156,15 +153,6 @@ func ParseOptions(options []string) (opt map[string]string) {
|
||||
func setAlternateFlag(flagName string, output *string) {
|
||||
if rcFlag := pflag.Lookup(flagName); rcFlag != nil && rcFlag.Changed {
|
||||
*output = rcFlag.Value.String()
|
||||
if sliceValue, ok := rcFlag.Value.(pflag.SliceValue); ok {
|
||||
stringSlice := sliceValue.GetSlice()
|
||||
for _, value := range stringSlice {
|
||||
if value != "" {
|
||||
*output = value
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -56,9 +56,6 @@ Note that the upload can also not be retried because the data is
|
||||
not kept around until the upload succeeds. If you need to transfer
|
||||
a lot of data, you're better off caching locally and then
|
||||
` + "`rclone move`" + ` it to the destination.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
libhttp "github.com/rclone/rclone/lib/http"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -32,10 +31,7 @@ for GET requests on the URL passed in. It will also open the URL in
|
||||
the browser when rclone is run.
|
||||
|
||||
See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
` + libhttp.Help + libhttp.TemplateHelp + libhttp.AuthHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.45",
|
||||
},
|
||||
`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if rcflags.Opt.Enabled {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user