mirror of
https://github.com/rclone/rclone.git
synced 2025-12-13 23:03:19 +00:00
Compare commits
39 Commits
jwt-v5-com
...
fix-auth-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69ae5f2aaf | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 |
@@ -12,6 +12,8 @@ RUN ./rclone version
|
|||||||
# Begin final image
|
# Begin final image
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
|
||||||
|
|
||||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||||
echo "user_allow_other" >> /etc/fuse.conf
|
echo "user_allow_other" >> /etc/fuse.conf
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
|||||||
package azureblob
|
package azureblob
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fstest"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func TestBlockIDCreator(t *testing.T) {
|
||||||
// Check first feature flags are set on this
|
// Check creation and random number
|
||||||
// remote
|
bic, err := newBlockIDCreator()
|
||||||
|
require.NoError(t, err)
|
||||||
|
bic2, err := newBlockIDCreator()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotEqual(t, bic.random, bic2.random)
|
||||||
|
assert.NotEqual(t, bic.random, [8]byte{})
|
||||||
|
|
||||||
|
// Set random to known value for tests
|
||||||
|
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||||
|
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||||
|
|
||||||
|
// Check creation of ID
|
||||||
|
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||||
|
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||||
|
got := bic.newBlockID(chunkNumber)
|
||||||
|
assert.Equal(t, want, got)
|
||||||
|
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||||
|
|
||||||
|
// Test checkID is working
|
||||||
|
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||||
|
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||||
|
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||||
|
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||||
|
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) testFeatures(t *testing.T) {
|
||||||
|
// Check first feature flags are set on this remote
|
||||||
enabled := f.Features().SetTier
|
enabled := f.Features().SetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
enabled = f.Features().GetTier
|
enabled = f.Features().GetTier
|
||||||
assert.True(t, enabled)
|
assert.True(t, enabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ReadSeekCloser struct {
|
||||||
|
*strings.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ReadSeekCloser) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stage a block at remote but don't commit it
|
||||||
|
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||||
|
var (
|
||||||
|
containerName, blobPath = f.split(remote)
|
||||||
|
containerClient = f.cntSVC(containerName)
|
||||||
|
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||||
|
data = "uncommitted data"
|
||||||
|
blockID = "1"
|
||||||
|
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||||
|
)
|
||||||
|
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||||
|
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify the block is staged but not committed
|
||||||
|
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
found := false
|
||||||
|
for _, block := range blockList.UncommittedBlocks {
|
||||||
|
if *block.Name == blockIDBase64 {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||||
|
}
|
||||||
|
|
||||||
|
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||||
|
//
|
||||||
|
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||||
|
//
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||||
|
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||||
|
var (
|
||||||
|
ctx = context.Background()
|
||||||
|
remote = "testBlob"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Multipart copy the blob please
|
||||||
|
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||||
|
f.opt.UseCopyBlob = false
|
||||||
|
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||||
|
defer func() {
|
||||||
|
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Create a blob with uncommitted blocks
|
||||||
|
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||||
|
|
||||||
|
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||||
|
|
||||||
|
// Check the object does not exist
|
||||||
|
_, err := f.NewObject(ctx, remote)
|
||||||
|
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
|
|
||||||
|
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||||
|
size := 4*int(f.opt.ChunkSize) - 1
|
||||||
|
contents := random.String(size)
|
||||||
|
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||||
|
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||||
|
|
||||||
|
// Check size
|
||||||
|
assert.Equal(t, int64(size), o.Size())
|
||||||
|
|
||||||
|
// Create a new blob with uncommitted blocks
|
||||||
|
newRemote := "testBlob2"
|
||||||
|
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||||
|
|
||||||
|
// Copy over that block
|
||||||
|
dst, err := f.Copy(ctx, o, newRemote)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Check basics
|
||||||
|
assert.Equal(t, int64(size), dst.Size())
|
||||||
|
assert.Equal(t, newRemote, dst.Remote())
|
||||||
|
|
||||||
|
// Check contents
|
||||||
|
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||||
|
assert.Equal(t, contents, gotContents)
|
||||||
|
|
||||||
|
// Remove the object
|
||||||
|
require.NoError(t, dst.Remove(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
|
t.Run("Features", f.testFeatures)
|
||||||
|
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,13 +15,17 @@ import (
|
|||||||
|
|
||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
|
name := "TestAzureBlob"
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestAzureBlob:",
|
RemoteName: name + ":",
|
||||||
NilObject: (*Object)(nil),
|
NilObject: (*Object)(nil),
|
||||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||||
MinChunkSize: defaultChunkSize,
|
MinChunkSize: defaultChunkSize,
|
||||||
},
|
},
|
||||||
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
|
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ExtraConfig: []fstests.ExtraConfigItem{
|
ExtraConfig: []fstests.ExtraConfigItem{
|
||||||
{Name: name, Key: "directory_markers", Value: "true"},
|
{Name: name, Key: "directory_markers", Value: "true"},
|
||||||
|
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
|||||||
return f.setUploadChunkSize(cs)
|
return f.setUploadChunkSize(cs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||||
|
return f.setCopyCutoff(cs)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||||
|
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestValidateAccessTier(t *testing.T) {
|
func TestValidateAccessTier(t *testing.T) {
|
||||||
|
|||||||
@@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
|||||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_instance_discovery",
|
||||||
|
Help: `Skip requesting Microsoft Entra instance metadata
|
||||||
|
This should be set true only by applications authenticating in
|
||||||
|
disconnected clouds, or private clouds such as Azure Stack.
|
||||||
|
It determines whether rclone requests Microsoft Entra instance
|
||||||
|
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||||
|
authenticating.
|
||||||
|
Setting this to true will skip this request, making you responsible
|
||||||
|
for ensuring the configured authority is valid and trustworthy.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_az",
|
||||||
|
Help: `Use Azure CLI tool az for authentication
|
||||||
|
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||||
|
as the sole means of authentication.
|
||||||
|
Setting this can be useful if you wish to use the az CLI on a host with
|
||||||
|
a System Managed Identity that you do not want to use.
|
||||||
|
Don't set env_auth at the same time.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||||
@@ -319,10 +343,12 @@ type Options struct {
|
|||||||
Username string `config:"username"`
|
Username string `config:"username"`
|
||||||
Password string `config:"password"`
|
Password string `config:"password"`
|
||||||
ServicePrincipalFile string `config:"service_principal_file"`
|
ServicePrincipalFile string `config:"service_principal_file"`
|
||||||
|
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||||
UseMSI bool `config:"use_msi"`
|
UseMSI bool `config:"use_msi"`
|
||||||
MSIObjectID string `config:"msi_object_id"`
|
MSIObjectID string `config:"msi_object_id"`
|
||||||
MSIClientID string `config:"msi_client_id"`
|
MSIClientID string `config:"msi_client_id"`
|
||||||
MSIResourceID string `config:"msi_mi_res_id"`
|
MSIResourceID string `config:"msi_mi_res_id"`
|
||||||
|
UseAZ bool `config:"use_az"`
|
||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||||
@@ -415,6 +441,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
|||||||
// Read credentials from the environment
|
// Read credentials from the environment
|
||||||
options := azidentity.DefaultAzureCredentialOptions{
|
options := azidentity.DefaultAzureCredentialOptions{
|
||||||
ClientOptions: policyClientOptions,
|
ClientOptions: policyClientOptions,
|
||||||
|
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||||
}
|
}
|
||||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -425,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||||
}
|
}
|
||||||
|
case opt.UseAZ:
|
||||||
|
var options = azidentity.AzureCLICredentialOptions{}
|
||||||
|
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||||
|
fmt.Println(cred)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||||
|
}
|
||||||
case opt.SASURL != "":
|
case opt.SASURL != "":
|
||||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -899,7 +933,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
|||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||||
//
|
//
|
||||||
// May make a network request becaue the [fs.List] method does not
|
// May make a network request because the [fs.List] method does not
|
||||||
// return MD5 hashes for DirEntry
|
// return MD5 hashes for DirEntry
|
||||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||||
if ty != hash.MD5 {
|
if ty != hash.MD5 {
|
||||||
|
|||||||
@@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
@@ -1318,17 +1319,23 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
|||||||
// Check current version of the file
|
// Check current version of the file
|
||||||
if deleteHidden && object.Action == "hide" {
|
if deleteHidden && object.Action == "hide" {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||||
|
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
|
}
|
||||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||||
|
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||||
|
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||||
toBeDeleted <- object
|
toBeDeleted <- object
|
||||||
}
|
}
|
||||||
|
}
|
||||||
last = remote
|
last = remote
|
||||||
tr.Done(ctx, nil)
|
tr.Done(ctx, nil)
|
||||||
}
|
}
|
||||||
@@ -2293,8 +2300,10 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||||
|
|
||||||
var bucket *api.Bucket
|
var bucket *api.Bucket
|
||||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil {
|
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -13,6 +14,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/cache"
|
"github.com/rclone/rclone/fs/cache"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
"github.com/rclone/rclone/fs/object"
|
||||||
"github.com/rclone/rclone/fstest"
|
"github.com/rclone/rclone/fstest"
|
||||||
"github.com/rclone/rclone/fstest/fstests"
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
"github.com/rclone/rclone/lib/bucket"
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
@@ -463,24 +465,161 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("Cleanup", func(t *testing.T) {
|
t.Run("Cleanup", func(t *testing.T) {
|
||||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
t.Run("DryRun", func(t *testing.T) {
|
||||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
|
||||||
fstest.CheckListing(t, f, items)
|
|
||||||
// Set --b2-versions for this test
|
|
||||||
f.opt.Versions = true
|
f.opt.Versions = true
|
||||||
defer func() {
|
defer func() {
|
||||||
f.opt.Versions = false
|
f.opt.Versions = false
|
||||||
}()
|
}()
|
||||||
|
// Listing should be unchanged after dry run
|
||||||
|
before := listAllFiles(ctx, t, f, dirName)
|
||||||
|
ctx, ci := fs.AddConfig(ctx)
|
||||||
|
ci.DryRun = true
|
||||||
|
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||||
|
after := listAllFiles(ctx, t, f, dirName)
|
||||||
|
assert.Equal(t, before, after)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RealThing", func(t *testing.T) {
|
||||||
|
f.opt.Versions = true
|
||||||
|
defer func() {
|
||||||
|
f.opt.Versions = false
|
||||||
|
}()
|
||||||
|
// Listing should reflect current state after cleanup
|
||||||
|
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||||
|
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||||
fstest.CheckListing(t, f, items)
|
fstest.CheckListing(t, f, items)
|
||||||
})
|
})
|
||||||
|
})
|
||||||
|
|
||||||
// Purge gets tested later
|
// Purge gets tested later
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// B2CleanupHidden tests cleaning up hidden files
|
||||||
|
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||||
|
dirName := "unfinished"
|
||||||
|
fileCount := 5
|
||||||
|
expectedFiles := []string{}
|
||||||
|
for i := 1; i < fileCount; i++ {
|
||||||
|
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||||
|
expectedFiles = append(expectedFiles, fileName)
|
||||||
|
obj := &Object{
|
||||||
|
fs: f,
|
||||||
|
remote: fileName,
|
||||||
|
}
|
||||||
|
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||||
|
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||||
|
|
||||||
|
t.Run("DryRun", func(t *testing.T) {
|
||||||
|
// Listing should not change after dry run
|
||||||
|
ctx, ci := fs.AddConfig(ctx)
|
||||||
|
ci.DryRun = true
|
||||||
|
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||||
|
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RealThing", func(t *testing.T) {
|
||||||
|
// Listing should be empty after real cleanup
|
||||||
|
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||||
|
checkListing(ctx, t, f, dirName, []string{})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||||
|
bucket, directory := f.split(dirName)
|
||||||
|
foundFiles := []string{}
|
||||||
|
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
|
if !isDirectory {
|
||||||
|
foundFiles = append(foundFiles, object.Name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
sort.Strings(foundFiles)
|
||||||
|
return foundFiles
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||||
|
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||||
|
sort.Strings(expectedFiles)
|
||||||
|
assert.Equal(t, expectedFiles, foundFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
opt := map[string]string{}
|
||||||
|
|
||||||
|
t.Run("InitState", func(t *testing.T) {
|
||||||
|
// There should be no lifecycle rules at the outset
|
||||||
|
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(lifecycleRules))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("DryRun", func(t *testing.T) {
|
||||||
|
// There should still be no lifecycle rules after each dry run operation
|
||||||
|
ctx, ci := fs.AddConfig(ctx)
|
||||||
|
ci.DryRun = true
|
||||||
|
|
||||||
|
opt["daysFromHidingToDeleting"] = "30"
|
||||||
|
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(lifecycleRules))
|
||||||
|
|
||||||
|
delete(opt, "daysFromHidingToDeleting")
|
||||||
|
opt["daysFromUploadingToHiding"] = "40"
|
||||||
|
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(lifecycleRules))
|
||||||
|
|
||||||
|
opt["daysFromHidingToDeleting"] = "30"
|
||||||
|
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 0, len(lifecycleRules))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RealThing", func(t *testing.T) {
|
||||||
|
opt["daysFromHidingToDeleting"] = "30"
|
||||||
|
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(lifecycleRules))
|
||||||
|
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||||
|
|
||||||
|
delete(opt, "daysFromHidingToDeleting")
|
||||||
|
opt["daysFromUploadingToHiding"] = "40"
|
||||||
|
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(lifecycleRules))
|
||||||
|
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||||
|
|
||||||
|
opt["daysFromHidingToDeleting"] = "30"
|
||||||
|
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||||
|
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 1, len(lifecycleRules))
|
||||||
|
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||||
|
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||||
func (f *Fs) InternalTest(t *testing.T) {
|
func (f *Fs) InternalTest(t *testing.T) {
|
||||||
t.Run("Metadata", f.InternalTestMetadata)
|
t.Run("Metadata", f.InternalTestMetadata)
|
||||||
t.Run("Versions", f.InternalTestVersions)
|
t.Run("Versions", f.InternalTestVersions)
|
||||||
|
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||||
|
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ fstests.InternalTester = (*Fs)(nil)
|
var _ fstests.InternalTester = (*Fs)(nil)
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang-jwt/jwt/v4"
|
||||||
"github.com/rclone/rclone/backend/box/api"
|
"github.com/rclone/rclone/backend/box/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
@@ -74,7 +75,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type boxCustomClaims struct {
|
type boxCustomClaims struct {
|
||||||
jwtutil.LegacyStandardClaims
|
jwt.StandardClaims
|
||||||
BoxSubType string `json:"box_sub_type,omitempty"`
|
BoxSubType string `json:"box_sub_type,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,8 +223,10 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
|||||||
}
|
}
|
||||||
|
|
||||||
claims = &boxCustomClaims{
|
claims = &boxCustomClaims{
|
||||||
LegacyStandardClaims: jwtutil.LegacyStandardClaims{
|
//lint:ignore SA1019 since we need to use jwt.StandardClaims even if deprecated in jwt-go v4 until a more permanent solution is ready in time before jwt-go v5 where it is removed entirely
|
||||||
ID: val,
|
//nolint:staticcheck // Don't include staticcheck when running golangci-lint to avoid SA1019
|
||||||
|
StandardClaims: jwt.StandardClaims{
|
||||||
|
Id: val,
|
||||||
Issuer: boxConfig.BoxAppSettings.ClientID,
|
Issuer: boxConfig.BoxAppSettings.ClientID,
|
||||||
Subject: boxConfig.EnterpriseID,
|
Subject: boxConfig.EnterpriseID,
|
||||||
Audience: tokenURL,
|
Audience: tokenURL,
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package googlephotos
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
|||||||
*fstest.RemoteName = "TestGooglePhotos:"
|
*fstest.RemoteName = "TestGooglePhotos:"
|
||||||
}
|
}
|
||||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// build request
|
// build request
|
||||||
// cant use normal rename as file needs to be "activated" first
|
// can't use normal rename as file needs to be "activated" first
|
||||||
|
|
||||||
r := api.NewUpdateFileInfo()
|
r := api.NewUpdateFileInfo()
|
||||||
r.DocumentID = doc.DocumentID
|
r.DocumentID = doc.DocumentID
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
|||||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobIDResponse respresents response struct with JobID for folder operations
|
// JobIDResponse represents response struct with JobID for folder operations
|
||||||
type JobIDResponse struct {
|
type JobIDResponse struct {
|
||||||
JobID string `json:"jobId"`
|
JobID string `json:"jobId"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -151,6 +151,19 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
|||||||
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
|
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
|
||||||
Default: "https://archive.org",
|
Default: "https://archive.org",
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "item_metadata",
|
||||||
|
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
|
||||||
|
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
|
||||||
|
Default: []string{},
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "item_derive",
|
||||||
|
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
|
||||||
|
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
|
||||||
|
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
|
||||||
|
Default: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_checksum",
|
Name: "disable_checksum",
|
||||||
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
|
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
|
||||||
@@ -201,6 +214,8 @@ type Options struct {
|
|||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
FrontEndpoint string `config:"front_endpoint"`
|
FrontEndpoint string `config:"front_endpoint"`
|
||||||
DisableChecksum bool `config:"disable_checksum"`
|
DisableChecksum bool `config:"disable_checksum"`
|
||||||
|
ItemMetadata []string `config:"item_metadata"`
|
||||||
|
ItemDerive bool `config:"item_derive"`
|
||||||
WaitArchive fs.Duration `config:"wait_archive"`
|
WaitArchive fs.Duration `config:"wait_archive"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
@@ -793,14 +808,20 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||||
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
|
|
||||||
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
|
|
||||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||||
}
|
}
|
||||||
|
|
||||||
if size >= 0 {
|
if size >= 0 {
|
||||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is IA's ITEM metadata, not file metadata
|
||||||
|
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var mdata fs.Metadata
|
var mdata fs.Metadata
|
||||||
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||||
if err == nil && mdata != nil {
|
if err == nil && mdata != nil {
|
||||||
@@ -863,6 +884,51 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
|
||||||
|
metadataCounter := make(map[string]int)
|
||||||
|
metadataValues := make(map[string][]string)
|
||||||
|
|
||||||
|
// First pass: count occurrences and collect values
|
||||||
|
for _, v := range options.ItemMetadata {
|
||||||
|
parts := strings.SplitN(v, "=", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
|
||||||
|
}
|
||||||
|
key, value := parts[0], parts[1]
|
||||||
|
metadataCounter[key]++
|
||||||
|
metadataValues[key] = append(metadataValues[key], value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second pass: add headers with appropriate prefixes
|
||||||
|
for key, count := range metadataCounter {
|
||||||
|
if count == 1 {
|
||||||
|
// Only one occurrence, use x-archive-meta-
|
||||||
|
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
|
||||||
|
} else {
|
||||||
|
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
|
||||||
|
for i, value := range metadataValues[key] {
|
||||||
|
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if o.fs.opt.ItemDerive {
|
||||||
|
headers["x-archive-queue-derive"] = "1"
|
||||||
|
} else {
|
||||||
|
headers["x-archive-queue-derive"] = "0"
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
|
||||||
|
|
||||||
|
for k, v := range headers {
|
||||||
|
if strings.HasPrefix(k, "x-archive-meta") {
|
||||||
|
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return headers, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
|
|||||||
@@ -131,7 +131,7 @@ func init() {
|
|||||||
Help: "Microsoft Cloud for US Government",
|
Help: "Microsoft Cloud for US Government",
|
||||||
}, {
|
}, {
|
||||||
Value: regionDE,
|
Value: regionDE,
|
||||||
Help: "Microsoft Cloud Germany",
|
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||||
}, {
|
}, {
|
||||||
Value: regionCN,
|
Value: regionCN,
|
||||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||||
|
|||||||
@@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
|||||||
})
|
})
|
||||||
// Set our own http client in the context
|
// Set our own http client in the context
|
||||||
ctx = oauthutil.Context(ctx, baseClient)
|
ctx = oauthutil.Context(ctx, baseClient)
|
||||||
// create a new oauth client, re-use the token source
|
// create a new oauth client, reuse the token source
|
||||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1343,7 +1343,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "objects-us-east-1.dream.io",
|
Value: "objects-us-east-1.dream.io",
|
||||||
Help: "Dream Objects endpoint",
|
Help: "Dream Objects endpoint",
|
||||||
@@ -1476,14 +1476,6 @@ func init() {
|
|||||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||||
Provider: "ArvanCloud",
|
Provider: "ArvanCloud",
|
||||||
}, {
|
|
||||||
Value: "br-se1.magaluobjects.com",
|
|
||||||
Help: "Magalu BR Southeast 1 endpoint",
|
|
||||||
Provider: "Magalu",
|
|
||||||
}, {
|
|
||||||
Value: "br-ne1.magaluobjects.com",
|
|
||||||
Help: "Magalu BR Northeast 1 endpoint",
|
|
||||||
Provider: "Magalu",
|
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
@@ -2122,13 +2114,16 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
|||||||
Help: "Standard storage class",
|
Help: "Standard storage class",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: #todo
|
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||||
Name: "storage_class",
|
Name: "storage_class",
|
||||||
Help: "The storage class to use when storing new objects in Magalu.",
|
Help: "The storage class to use when storing new objects in Magalu.",
|
||||||
Provider: "Magalu",
|
Provider: "Magalu",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "STANDARD",
|
Value: "STANDARD",
|
||||||
Help: "Standard storage class",
|
Help: "Standard storage class",
|
||||||
|
}, {
|
||||||
|
Value: "GLACIER_IR",
|
||||||
|
Help: "Glacier Instant Retrieval storage class",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||||
@@ -3344,7 +3339,7 @@ func setQuirks(opt *Options) {
|
|||||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||||
@@ -3682,6 +3677,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if opt.Provider == "IDrive" {
|
if opt.Provider == "IDrive" {
|
||||||
f.features.SetTier = false
|
f.features.SetTier = false
|
||||||
}
|
}
|
||||||
|
if opt.Provider == "AWS" {
|
||||||
|
f.features.DoubleSlash = true
|
||||||
|
}
|
||||||
if opt.DirectoryMarkers {
|
if opt.DirectoryMarkers {
|
||||||
f.features.CanHaveEmptyDirectories = true
|
f.features.CanHaveEmptyDirectories = true
|
||||||
}
|
}
|
||||||
@@ -4153,7 +4151,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
|||||||
opt.prefix += "/"
|
opt.prefix += "/"
|
||||||
}
|
}
|
||||||
if !opt.findFile {
|
if !opt.findFile {
|
||||||
if opt.directory != "" {
|
if opt.directory != "" && (opt.prefix == "" && !bucket.IsAllSlashes(opt.directory) || opt.prefix != "" && !strings.HasSuffix(opt.directory, "/")) {
|
||||||
opt.directory += "/"
|
opt.directory += "/"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -4250,14 +4248,18 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
|||||||
}
|
}
|
||||||
remote = f.opt.Enc.ToStandardPath(remote)
|
remote = f.opt.Enc.ToStandardPath(remote)
|
||||||
if !strings.HasPrefix(remote, opt.prefix) {
|
if !strings.HasPrefix(remote, opt.prefix) {
|
||||||
fs.Logf(f, "Odd name received %q", remote)
|
fs.Logf(f, "Odd directory name received %q", remote)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
remote = remote[len(opt.prefix):]
|
remote = remote[len(opt.prefix):]
|
||||||
|
// Trim one slash off the remote name
|
||||||
|
remote, _ = strings.CutSuffix(remote, "/")
|
||||||
|
if remote == "" || bucket.IsAllSlashes(remote) {
|
||||||
|
remote += "/"
|
||||||
|
}
|
||||||
if opt.addBucket {
|
if opt.addBucket {
|
||||||
remote = bucket.Join(opt.bucket, remote)
|
remote = bucket.Join(opt.bucket, remote)
|
||||||
}
|
}
|
||||||
remote = strings.TrimSuffix(remote, "/")
|
|
||||||
err = fn(remote, &types.Object{Key: &remote}, nil, true)
|
err = fn(remote, &types.Object{Key: &remote}, nil, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errEndList {
|
if err == errEndList {
|
||||||
@@ -6057,7 +6059,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
|||||||
if mOut == nil {
|
if mOut == nil {
|
||||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||||
} else if mOut.UploadId == nil {
|
} else if mOut.UploadId == nil {
|
||||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return f.shouldRetry(ctx, err)
|
return f.shouldRetry(ctx, err)
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||||
out, err := cmd.Output()
|
out, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||||
}
|
}
|
||||||
logMap = map[string]string{}
|
logMap = map[string]string{}
|
||||||
logs = []string{}
|
logs = []string{}
|
||||||
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||||||
}
|
}
|
||||||
match := logRe.FindSubmatch(line)
|
match := logRe.FindSubmatch(line)
|
||||||
if match == nil {
|
if match == nil {
|
||||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||||
}
|
}
|
||||||
var hash, logMessage = string(match[1]), string(match[2])
|
var hash, logMessage = string(match[1]), string(match[2])
|
||||||
logMap[logMessage] = hash
|
logMap[logMessage] = hash
|
||||||
@@ -52,12 +52,12 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
args := flag.Args()
|
args := flag.Args()
|
||||||
if len(args) != 0 {
|
if len(args) != 0 {
|
||||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||||
}
|
}
|
||||||
// v1.54.0
|
// v1.54.0
|
||||||
versionBytes, err := os.ReadFile("VERSION")
|
versionBytes, err := os.ReadFile("VERSION")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||||
}
|
}
|
||||||
if versionBytes[0] == 'v' {
|
if versionBytes[0] == 'v' {
|
||||||
versionBytes = versionBytes[1:]
|
versionBytes = versionBytes[1:]
|
||||||
@@ -65,7 +65,7 @@ func main() {
|
|||||||
versionBytes = bytes.TrimSpace(versionBytes)
|
versionBytes = bytes.TrimSpace(versionBytes)
|
||||||
semver := semver.New(string(versionBytes))
|
semver := semver.New(string(versionBytes))
|
||||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||||
for _, logMessage := range masterLogs {
|
for _, logMessage := range masterLogs {
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
|||||||
if b.opt.CompareFlag == "" {
|
if b.opt.CompareFlag == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var CompareFlag CompareOpt // for exlcusions
|
var CompareFlag CompareOpt // for exclusions
|
||||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||||
|
|||||||
@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
|
|||||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkListing verifies that listing is not empty (unless resynching)
|
// checkListing verifies that listing is not empty (unless resyncing)
|
||||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||||
if b.opt.Resync || !ls.empty() {
|
if b.opt.Resync || !ls.empty() {
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
|||||||
INFO : [32mBisync successful[0m
|
INFO : [32mBisync successful[0m
|
||||||
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
||||||
|
|
||||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in synching all content to path2.[0m
|
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in syncing all content to path2.[0m
|
||||||
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
||||||
[36m(08) :[0m [34mbisync resync[0m
|
[36m(08) :[0m [34mbisync resync[0m
|
||||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||||
|
|||||||
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
@@ -1,6 +1,6 @@
|
|||||||
test resync
|
test resync
|
||||||
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
||||||
# 2. Resync with empty Path2, resulting in synching all content TO Path2
|
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
|
||||||
# 3. Exercise all of the various file difference scenarios during a resync:
|
# 3. Exercise all of the various file difference scenarios during a resync:
|
||||||
# File Path1 Path2 Expected action Who wins
|
# File Path1 Path2 Expected action Who wins
|
||||||
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
||||||
@@ -17,7 +17,7 @@ purge-children {path1/}
|
|||||||
bisync resync
|
bisync resync
|
||||||
move-listings empty-path1
|
move-listings empty-path1
|
||||||
|
|
||||||
test 2. resync with empty path2, resulting in synching all content to path2.
|
test 2. resync with empty path2, resulting in syncing all content to path2.
|
||||||
purge-children {path2/}
|
purge-children {path2/}
|
||||||
bisync resync
|
bisync resync
|
||||||
move-listings empty-path2
|
move-listings empty-path2
|
||||||
|
|||||||
@@ -429,11 +429,12 @@ func initConfig() {
|
|||||||
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the metrics server if configured
|
// Start the metrics server if configured and not running the "rc" command
|
||||||
|
if os.Args[1] != "rc" {
|
||||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup CPU profiling if desired
|
// Setup CPU profiling if desired
|
||||||
|
|||||||
@@ -549,12 +549,12 @@ password to re-encrypt the config.
|
|||||||
|
|
||||||
When |--password-command| is called to change the password then the
|
When |--password-command| is called to change the password then the
|
||||||
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||||
changing passwords programatically you can use the environment
|
changing passwords programmatically you can use the environment
|
||||||
variable to distinguish which password you must supply.
|
variable to distinguish which password you must supply.
|
||||||
|
|
||||||
Alternatively you can remove the password first (with |rclone config
|
Alternatively you can remove the password first (with |rclone config
|
||||||
encryption remove|), then set it again with this command which may be
|
encryption remove|), then set it again with this command which may be
|
||||||
easier if you don't mind the unecrypted config file being on the disk
|
easier if you don't mind the unencrypted config file being on the disk
|
||||||
briefly.
|
briefly.
|
||||||
`, "|", "`"),
|
`, "|", "`"),
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ destination if there is one with the same name.
|
|||||||
Setting |--stdout| or making the output file name |-|
|
Setting |--stdout| or making the output file name |-|
|
||||||
will cause the output to be written to standard output.
|
will cause the output to be written to standard output.
|
||||||
|
|
||||||
### Troublshooting
|
### Troubleshooting
|
||||||
|
|
||||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||||
|
|
||||||
|
|||||||
@@ -194,7 +194,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
|
|||||||
return file.Chown(uid, gid)
|
return file.Chown(uid, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chtimes changes the acces time and modified time
|
// Chtimes changes the access time and modified time
|
||||||
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
||||||
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
||||||
return f.vfs.Chtimes(name, atime, mtime)
|
return f.vfs.Chtimes(name, atime, mtime)
|
||||||
|
|||||||
@@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
|||||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||||
handle which improves performance. This sort of cache can't be backed
|
handle which improves performance. This sort of cache can't be backed
|
||||||
up and restored as the underlying handles will change. This is Linux
|
up and restored as the underlying handles will change. This is Linux
|
||||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||||
You can run rclone with this extra permission by doing this to the
|
You can run rclone with this extra permission by doing this to the
|
||||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||||
|
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ package proxy
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -217,8 +219,13 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
|
|||||||
return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err)
|
return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add a config hash to ensure configs with different values have different names.
|
||||||
|
// 5 characters length is 5*6 = 30 bits of base64
|
||||||
|
md5sumBinary := md5.Sum([]byte(config.String()))
|
||||||
|
configHash := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])[:5]
|
||||||
|
|
||||||
// base name of config on user name. This may appear in logs
|
// base name of config on user name. This may appear in logs
|
||||||
name := "proxy-" + user
|
name := "proxy-" + user + "-" + configHash
|
||||||
fsString := name + ":" + root
|
fsString := name + ":" + root
|
||||||
|
|
||||||
// Look for fs in the VFS cache
|
// Look for fs in the VFS cache
|
||||||
|
|||||||
@@ -90,7 +90,8 @@ func TestRun(t *testing.T) {
|
|||||||
require.NotNil(t, entry.vfs)
|
require.NotNil(t, entry.vfs)
|
||||||
f := entry.vfs.Fs()
|
f := entry.vfs.Fs()
|
||||||
require.NotNil(t, f)
|
require.NotNil(t, f)
|
||||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
|
||||||
|
assert.Equal(t, len("proxy-"+testUser+"-")+5, len(f.Name()))
|
||||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||||
|
|
||||||
// check it is in the cache
|
// check it is in the cache
|
||||||
@@ -108,7 +109,7 @@ func TestRun(t *testing.T) {
|
|||||||
vfs, vfsKey, err := p.Call(testUser, testPass, false)
|
vfs, vfsKey, err := p.Call(testUser, testPass, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, vfs)
|
require.NotNil(t, vfs)
|
||||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||||
assert.Equal(t, testUser, vfsKey)
|
assert.Equal(t, testUser, vfsKey)
|
||||||
|
|
||||||
// check it is in the cache
|
// check it is in the cache
|
||||||
@@ -129,7 +130,7 @@ func TestRun(t *testing.T) {
|
|||||||
vfs, vfsKey, err = p.Call(testUser, testPass, false)
|
vfs, vfsKey, err = p.Call(testUser, testPass, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, vfs)
|
require.NotNil(t, vfs)
|
||||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||||
assert.Equal(t, testUser, vfsKey)
|
assert.Equal(t, testUser, vfsKey)
|
||||||
|
|
||||||
// check cache is at the same level
|
// check cache is at the same level
|
||||||
@@ -173,7 +174,7 @@ func TestRun(t *testing.T) {
|
|||||||
require.NotNil(t, entry.vfs)
|
require.NotNil(t, entry.vfs)
|
||||||
f := entry.vfs.Fs()
|
f := entry.vfs.Fs()
|
||||||
require.NotNil(t, f)
|
require.NotNil(t, f)
|
||||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
|
||||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||||
|
|
||||||
// check it is in the cache
|
// check it is in the cache
|
||||||
@@ -195,7 +196,7 @@ func TestRun(t *testing.T) {
|
|||||||
)
|
)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, vfs)
|
require.NotNil(t, vfs)
|
||||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||||
assert.Equal(t, testUser, vfsKey)
|
assert.Equal(t, testUser, vfsKey)
|
||||||
|
|
||||||
// check it is in the cache
|
// check it is in the cache
|
||||||
@@ -216,7 +217,7 @@ func TestRun(t *testing.T) {
|
|||||||
vfs, vfsKey, err = p.Call(testUser, publicKeyString, true)
|
vfs, vfsKey, err = p.Call(testUser, publicKeyString, true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, vfs)
|
require.NotNil(t, vfs)
|
||||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||||
assert.Equal(t, testUser, vfsKey)
|
assert.Equal(t, testUser, vfsKey)
|
||||||
|
|
||||||
// check cache is at the same level
|
// check cache is at the same level
|
||||||
|
|||||||
@@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject fetchs the object from the filesystem.
|
// GetObject fetches the object from the filesystem.
|
||||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||||
_vfs, err := b.s.getVFS(ctx)
|
_vfs, err := b.s.getVFS(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
|
|||||||
}
|
}
|
||||||
|
|
||||||
fp := path.Join(bucketName, objectName)
|
fp := path.Join(bucketName, objectName)
|
||||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
// S3 does not report an error when attempting to delete a key that does not exist, so
|
||||||
// we need to skip IsNotExist errors.
|
// we need to skip IsNotExist errors.
|
||||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
|||||||
for _, entry := range dirEntries {
|
for _, entry := range dirEntries {
|
||||||
object := entry.Name()
|
object := entry.Name()
|
||||||
|
|
||||||
// workround for control-chars detect
|
// workaround for control-chars detect
|
||||||
objectPath := path.Join(fdPath, object)
|
objectPath := path.Join(fdPath, object)
|
||||||
|
|
||||||
if !strings.HasPrefix(object, name) {
|
if !strings.HasPrefix(object, name) {
|
||||||
|
|||||||
@@ -928,3 +928,13 @@ put them back in again.` >}}
|
|||||||
* Matt Ickstadt <mattico8@gmail.com> <matt@beckenterprises.com>
|
* Matt Ickstadt <mattico8@gmail.com> <matt@beckenterprises.com>
|
||||||
* Spencer McCullough <mccullough.spencer@gmail.com>
|
* Spencer McCullough <mccullough.spencer@gmail.com>
|
||||||
* Jonathan Giannuzzi <jonathan@giannuzzi.me>
|
* Jonathan Giannuzzi <jonathan@giannuzzi.me>
|
||||||
|
* Christoph Berger <github@christophberger.com>
|
||||||
|
* Tim White <tim.white@su.org.au>
|
||||||
|
* Robin Schneider <robin.schneider@stackit.cloud>
|
||||||
|
* izouxv <izouxv@users.noreply.github.com>
|
||||||
|
* Moises Lima <mozlima@users.noreply.github.com>
|
||||||
|
* Bruno Fernandes <bruno.fernandes1996@hotmail.com>
|
||||||
|
* Corentin Barreau <corentin@archive.org>
|
||||||
|
* hiddenmarten <hiddenmarten@gmail.com>
|
||||||
|
* Trevor Starick <trevor.starick@gmail.com>
|
||||||
|
* b-wimmer <132347192+b-wimmer@users.noreply.github.com>
|
||||||
|
|||||||
@@ -938,8 +938,9 @@ You can set custom upload headers with the `--header-upload` flag.
|
|||||||
- Content-Encoding
|
- Content-Encoding
|
||||||
- Content-Language
|
- Content-Language
|
||||||
- Content-Type
|
- Content-Type
|
||||||
|
- X-MS-Tags
|
||||||
|
|
||||||
Eg `--header-upload "Content-Type: text/potato"`
|
Eg `--header-upload "Content-Type: text/potato"` or `--header-upload "X-MS-Tags: foo=bar"`
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
|
|||||||
@@ -206,6 +206,13 @@ If the resource has multiple user-assigned identities you will need to
|
|||||||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||||
section](#use_msi).
|
section](#use_msi).
|
||||||
|
|
||||||
|
If you are operating in disconnected clouds, or private clouds such as
|
||||||
|
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||||
|
This determines whether rclone requests Microsoft Entra instance
|
||||||
|
metadata from `https://login.microsoft.com/` before authenticating.
|
||||||
|
Setting this to `true` will skip this request, making you responsible
|
||||||
|
for ensuring the configured authority is valid and trustworthy.
|
||||||
|
|
||||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||||
|
|
||||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||||
@@ -289,6 +296,13 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
|||||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||||
set, this is is equivalent to using `env_auth`.
|
set, this is is equivalent to using `env_auth`.
|
||||||
|
|
||||||
|
#### Azure CLI tool `az` {#use_az}
|
||||||
|
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||||
|
as the sole means of authentication.
|
||||||
|
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||||
|
a System Managed Identity that you do not want to use.
|
||||||
|
Don't set `env_auth` at the same time.
|
||||||
|
|
||||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go then run make backenddocs" >}}
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
|
|||||||
@@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
|
|||||||
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||||
documentation.
|
documentation.
|
||||||
An [example filters file](#example-filters-file) contains filters for
|
An [example filters file](#example-filters-file) contains filters for
|
||||||
non-allowed files for synching with Dropbox.
|
non-allowed files for syncing with Dropbox.
|
||||||
|
|
||||||
If you make changes to your filters file then bisync requires a run
|
If you make changes to your filters file then bisync requires a run
|
||||||
with `--resync`. This is a safety feature, which prevents existing files
|
with `--resync`. This is a safety feature, which prevents existing files
|
||||||
@@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
|||||||
sync run times for very large numbers of files.
|
sync run times for very large numbers of files.
|
||||||
|
|
||||||
The check may be run manually with `--check-sync=only`. It runs only the
|
The check may be run manually with `--check-sync=only`. It runs only the
|
||||||
integrity check and terminates without actually synching.
|
integrity check and terminates without actually syncing.
|
||||||
|
|
||||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||||
actual files on the remotes.** Note also that the listing snapshots will not
|
actual files on the remotes.** Note also that the listing snapshots will not
|
||||||
@@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
|||||||
|
|
||||||
### How to filter directories
|
### How to filter directories
|
||||||
|
|
||||||
Filtering portions of the directory tree is a critical feature for synching.
|
Filtering portions of the directory tree is a critical feature for syncing.
|
||||||
|
|
||||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||||
you may want to exclude from your sync:
|
you may want to exclude from your sync:
|
||||||
@@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
|
|||||||
|
|
||||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||||
|
|
||||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||||
The `- <filename>` filters exclude these files where ever they may occur
|
The `- <filename>` filters exclude these files where ever they may occur
|
||||||
in the sync tree. Consider adding similar exclusions for file types
|
in the sync tree. Consider adding similar exclusions for file types
|
||||||
you don't need to sync, such as core dump and software build files.
|
you don't need to sync, such as core dump and software build files.
|
||||||
@@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
|||||||
|
|
||||||
- `go test . -case basic -remote local -remote2 local`
|
- `go test . -case basic -remote local -remote2 local`
|
||||||
runs the `test_basic` test case using only the local filesystem,
|
runs the `test_basic` test case using only the local filesystem,
|
||||||
synching one local directory with another local directory.
|
syncing one local directory with another local directory.
|
||||||
Test script output is to the console, while commands within scenario.txt
|
Test script output is to the console, while commands within scenario.txt
|
||||||
have their output sent to the `.../workdir/test.log` file,
|
have their output sent to the `.../workdir/test.log` file,
|
||||||
which is finally compared to the golden copy.
|
which is finally compared to the golden copy.
|
||||||
|
|||||||
@@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
|
|||||||
chunk read making sure each nonce is unique for each block written.
|
chunk read making sure each nonce is unique for each block written.
|
||||||
The chance of a nonce being reused is minuscule. If you wrote an
|
The chance of a nonce being reused is minuscule. If you wrote an
|
||||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||||
approximately 2×10⁻³² of re-using a nonce.
|
approximately 2×10⁻³² of reusing a nonce.
|
||||||
|
|
||||||
#### Chunk
|
#### Chunk
|
||||||
|
|
||||||
|
|||||||
@@ -619,6 +619,11 @@ it to `false`. It is also possible to specify `--boolean=false` or
|
|||||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||||
line argument for rclone.
|
line argument for rclone.
|
||||||
|
|
||||||
|
Options documented to take a `stringArray` parameter accept multiple
|
||||||
|
values. To pass more than one value, repeat the option; for example:
|
||||||
|
`--include value1 --include value2`.
|
||||||
|
|
||||||
|
|
||||||
### Time or duration options {#time-option}
|
### Time or duration options {#time-option}
|
||||||
|
|
||||||
TIME or DURATION options can be specified as a duration string or a
|
TIME or DURATION options can be specified as a duration string or a
|
||||||
@@ -2930,7 +2935,7 @@ so they take exactly the same form.
|
|||||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||||
|
|
||||||
Options that can appear multiple times (type `stringArray`) are
|
Options that can appear multiple times (type `stringArray`) are
|
||||||
treated slighly differently as environment variables can only be
|
treated slightly differently as environment variables can only be
|
||||||
defined once. In order to allow a simple mechanism for adding one or
|
defined once. In order to allow a simple mechanism for adding one or
|
||||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||||
string. For example
|
string. For example
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ description: "Rclone Global Flags"
|
|||||||
This describes the global flags available to every rclone command
|
This describes the global flags available to every rclone command
|
||||||
split into groups.
|
split into groups.
|
||||||
|
|
||||||
|
See the [Options section](/docs/#options) for syntax and usage advice.
|
||||||
|
|
||||||
|
|
||||||
## Copy
|
## Copy
|
||||||
|
|
||||||
|
|||||||
@@ -384,7 +384,7 @@ Use the gphotosdl proxy for downloading the full resolution images
|
|||||||
The Google API will deliver images and video which aren't full
|
The Google API will deliver images and video which aren't full
|
||||||
resolution, and/or have EXIF data missing.
|
resolution, and/or have EXIF data missing.
|
||||||
|
|
||||||
However if you ue the gphotosdl proxy tnen you can download original,
|
However if you use the gphotosdl proxy then you can download original,
|
||||||
unchanged images.
|
unchanged images.
|
||||||
|
|
||||||
This runs a headless browser in the background.
|
This runs a headless browser in the background.
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ Enter a value.
|
|||||||
config_2fa> 2FACODE
|
config_2fa> 2FACODE
|
||||||
Remote config
|
Remote config
|
||||||
--------------------
|
--------------------
|
||||||
[koofr]
|
[iclouddrive]
|
||||||
- type: iclouddrive
|
- type: iclouddrive
|
||||||
- apple_id: APPLEID
|
- apple_id: APPLEID
|
||||||
- password: *** ENCRYPTED ***
|
- password: *** ENCRYPTED ***
|
||||||
@@ -78,6 +78,20 @@ y/e/d> y
|
|||||||
|
|
||||||
ADP is currently unsupported and need to be disabled
|
ADP is currently unsupported and need to be disabled
|
||||||
|
|
||||||
|
On iPhone, Settings `>` Apple Account `>` iCloud `>` 'Access iCloud Data on the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Missing PCS cookies from the request
|
||||||
|
|
||||||
|
This means you have Advanced Data Protection (ADP) turned on. This is not supported at the moment. If you want to use rclone you will have to turn it off. See above for how to turn it off.
|
||||||
|
|
||||||
|
You will need to clear the `cookies` and the `trust_token` fields in the config. Or you can delete the remote config and start again.
|
||||||
|
|
||||||
|
You should then run `rclone reconnect remote:`.
|
||||||
|
|
||||||
|
Note that changing the ADP setting may not take effect immediately - you may need to wait a few hours or a day before you can get rclone to work - keep clearing the config entry and running `rclone reconnect remote:` until rclone functions properly.
|
||||||
|
|
||||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
|
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
|
||||||
### Standard options
|
### Standard options
|
||||||
|
|
||||||
|
|||||||
@@ -936,6 +936,28 @@ See the [metadata](/docs/#metadata) docs for more info.
|
|||||||
|
|
||||||
{{< rem autogenerated options stop >}}
|
{{< rem autogenerated options stop >}}
|
||||||
|
|
||||||
|
### Impersonate other users as Admin
|
||||||
|
|
||||||
|
Unlike Google Drive and impersonating any domain user via service accounts, OneDrive requires you to authenticate as an admin account, and manually setup a remote per user you wish to impersonate.
|
||||||
|
|
||||||
|
1. In [Microsoft 365 Admin Center](https://admin.microsoft.com), open each user you need to "impersonate" and go to the OneDrive section. There is a heading called "Get access to files", you need to click to create the link, this creates the link of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/` but also changes the permissions so you your admin user has access.
|
||||||
|
2. Then in powershell run the following commands:
|
||||||
|
```console
|
||||||
|
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||||
|
Import-Module Microsoft.Graph.Files
|
||||||
|
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||||
|
# Follow the steps to allow access to your admin user
|
||||||
|
# Then run this for each user you want to impersonate to get the Drive ID
|
||||||
|
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||||
|
# This will give you output of the format:
|
||||||
|
# Name Id DriveType CreatedDateTime
|
||||||
|
# ---- -- --------- ---------------
|
||||||
|
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||||
|
|
||||||
|
```
|
||||||
|
3. Then in rclone add a onedrive remote type, and use the `Type in driveID` with the DriveID you got in the previous step. One remote per user. It will then confirm the drive ID, and hopefully give you a message of `Found drive "root" of type "business"` and then include the URL of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents`
|
||||||
|
|
||||||
|
|
||||||
## Limitations
|
## Limitations
|
||||||
|
|
||||||
If you don't use rclone for 90 days the refresh token will
|
If you don't use rclone for 90 days the refresh token will
|
||||||
|
|||||||
@@ -2068,7 +2068,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
The `expiry` time is the time until the file is elegible for being
|
The `expiry` time is the time until the file is eligible for being
|
||||||
uploaded in floating point seconds. This may go negative. As rclone
|
uploaded in floating point seconds. This may go negative. As rclone
|
||||||
only transfers `--transfers` files at once, only the lowest
|
only transfers `--transfers` files at once, only the lowest
|
||||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||||
|
|||||||
@@ -750,7 +750,7 @@ Notes on above:
|
|||||||
that `USER_NAME` has been created.
|
that `USER_NAME` has been created.
|
||||||
2. The Resource entry must include both resource ARNs, as one implies
|
2. The Resource entry must include both resource ARNs, as one implies
|
||||||
the bucket and the other implies the bucket's objects.
|
the bucket and the other implies the bucket's objects.
|
||||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||||
|
|
||||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||||
that will generate one or more buckets that will work with `rclone sync`.
|
that will generate one or more buckets that will work with `rclone sync`.
|
||||||
|
|||||||
4
fs/cache/cache_test.go
vendored
4
fs/cache/cache_test.go
vendored
@@ -131,7 +131,7 @@ func TestPutErr(t *testing.T) {
|
|||||||
assert.Equal(t, 1, Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|
||||||
fNew, err := GetFn(context.Background(), "mock:/", create)
|
fNew, err := GetFn(context.Background(), "mock:/", create)
|
||||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||||
require.Equal(t, f, fNew)
|
require.Equal(t, f, fNew)
|
||||||
|
|
||||||
assert.Equal(t, 1, Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
@@ -141,7 +141,7 @@ func TestPutErr(t *testing.T) {
|
|||||||
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
|
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
|
||||||
|
|
||||||
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
|
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
|
||||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||||
require.Equal(t, f, fNew)
|
require.Equal(t, f, fNew)
|
||||||
|
|
||||||
assert.Equal(t, 1, Entries())
|
assert.Equal(t, 1, Entries())
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ type Features struct {
|
|||||||
NoMultiThreading bool // set if can't have multiplethreads on one download open
|
NoMultiThreading bool // set if can't have multiplethreads on one download open
|
||||||
Overlay bool // this wraps one or more backends to add functionality
|
Overlay bool // this wraps one or more backends to add functionality
|
||||||
ChunkWriterDoesntSeek bool // set if the chunk writer doesn't need to read the data more than once
|
ChunkWriterDoesntSeek bool // set if the chunk writer doesn't need to read the data more than once
|
||||||
|
DoubleSlash bool // set if backend supports double slashes in paths
|
||||||
|
|
||||||
// Purge all files in the directory specified
|
// Purge all files in the directory specified
|
||||||
//
|
//
|
||||||
@@ -383,6 +384,8 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
|
|||||||
ft.PartialUploads = ft.PartialUploads && mask.PartialUploads
|
ft.PartialUploads = ft.PartialUploads && mask.PartialUploads
|
||||||
ft.NoMultiThreading = ft.NoMultiThreading && mask.NoMultiThreading
|
ft.NoMultiThreading = ft.NoMultiThreading && mask.NoMultiThreading
|
||||||
// ft.Overlay = ft.Overlay && mask.Overlay don't propagate Overlay
|
// ft.Overlay = ft.Overlay && mask.Overlay don't propagate Overlay
|
||||||
|
ft.ChunkWriterDoesntSeek = ft.ChunkWriterDoesntSeek && mask.ChunkWriterDoesntSeek
|
||||||
|
ft.DoubleSlash = ft.DoubleSlash && mask.DoubleSlash
|
||||||
|
|
||||||
if mask.Purge == nil {
|
if mask.Purge == nil {
|
||||||
ft.Purge = nil
|
ft.Purge = nil
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ func TestCertificates(t *testing.T) {
|
|||||||
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
|
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write some test data to fullfil the request
|
// Write some test data to fulfill the request
|
||||||
w.Header().Set("Content-Type", "text/plain")
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
_, _ = fmt.Fprintln(w, "test data")
|
_, _ = fmt.Fprintln(w, "test data")
|
||||||
}))
|
}))
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
|
"github.com/rclone/rclone/lib/bucket"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DirSorted reads Object and *Dir into entries for the given Fs.
|
// DirSorted reads Object and *Dir into entries for the given Fs.
|
||||||
@@ -43,7 +44,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
|||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
prefix := ""
|
prefix := ""
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
prefix = dir + "/"
|
prefix = dir
|
||||||
|
if !bucket.IsAllSlashes(dir) {
|
||||||
|
prefix += "/"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
ok := true
|
ok := true
|
||||||
@@ -77,10 +81,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
|||||||
case !strings.HasPrefix(remote, prefix):
|
case !strings.HasPrefix(remote, prefix):
|
||||||
ok = false
|
ok = false
|
||||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
|
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
|
||||||
case remote == prefix:
|
case remote == dir:
|
||||||
ok = false
|
ok = false
|
||||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
|
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
|
||||||
case strings.ContainsRune(remote[len(prefix):], '/'):
|
case strings.ContainsRune(remote[len(prefix):], '/') && !bucket.IsAllSlashes(remote[len(prefix):]):
|
||||||
ok = false
|
ok = false
|
||||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
|
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ func TestFilterAndSortIncludeAll(t *testing.T) {
|
|||||||
|
|
||||||
func TestFilterAndSortCheckDir(t *testing.T) {
|
func TestFilterAndSortCheckDir(t *testing.T) {
|
||||||
// Check the different kinds of error when listing "dir"
|
// Check the different kinds of error when listing "dir"
|
||||||
da := mockdir.New("dir/")
|
da := mockdir.New("dir")
|
||||||
|
da2 := mockdir.New("dir/") // double slash dir - allowed for bucket based remotes
|
||||||
oA := mockobject.Object("diR/a")
|
oA := mockobject.Object("diR/a")
|
||||||
db := mockdir.New("dir/b")
|
db := mockdir.New("dir/b")
|
||||||
oB := mockobject.Object("dir/B/sub")
|
oB := mockobject.Object("dir/B/sub")
|
||||||
@@ -57,18 +58,19 @@ func TestFilterAndSortCheckDir(t *testing.T) {
|
|||||||
oC := mockobject.Object("dir/C")
|
oC := mockobject.Object("dir/C")
|
||||||
dd := mockdir.New("dir/d")
|
dd := mockdir.New("dir/d")
|
||||||
oD := mockobject.Object("dir/D")
|
oD := mockobject.Object("dir/D")
|
||||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil)
|
newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
|
fs.DirEntries{da2, oC, oD, db, dc, dd},
|
||||||
newEntries,
|
newEntries,
|
||||||
fs.DirEntries{oC, oD, db, dc, dd},
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||||
// Check the different kinds of error when listing the root ""
|
// Check the different kinds of error when listing the root ""
|
||||||
da := mockdir.New("")
|
da := mockdir.New("")
|
||||||
|
da2 := mockdir.New("/") // doubleslash dir allowed on bucket based remotes
|
||||||
oA := mockobject.Object("A")
|
oA := mockobject.Object("A")
|
||||||
db := mockdir.New("b")
|
db := mockdir.New("b")
|
||||||
oB := mockobject.Object("B/sub")
|
oB := mockobject.Object("B/sub")
|
||||||
@@ -76,12 +78,12 @@ func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
|||||||
oC := mockobject.Object("C")
|
oC := mockobject.Object("C")
|
||||||
dd := mockdir.New("d")
|
dd := mockdir.New("d")
|
||||||
oD := mockobject.Object("D")
|
oD := mockobject.Object("D")
|
||||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
|
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
|
fs.DirEntries{da2, oA, oC, oD, db, dc, dd},
|
||||||
newEntries,
|
newEntries,
|
||||||
fs.DirEntries{oA, oC, oD, db, dc, dd},
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ func LogValueHide(key string, value interface{}) LogValueItem {
|
|||||||
return LogValueItem{key: key, value: value, render: false}
|
return LogValueItem{key: key, value: value, render: false}
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the representation of value. If render is fals this
|
// String returns the representation of value. If render is false this
|
||||||
// is an empty string so LogValueItem entries won't show in the
|
// is an empty string so LogValueItem entries won't show in the
|
||||||
// textual representation of logs.
|
// textual representation of logs.
|
||||||
func (j LogValueItem) String() string {
|
func (j LogValueItem) String() string {
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -104,7 +105,7 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, conne
|
|||||||
m := ConfigMap("", nil, configName, parsed.Config)
|
m := ConfigMap("", nil, configName, parsed.Config)
|
||||||
fsName, ok = m.Get("type")
|
fsName, ok = m.Get("type")
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, "", "", nil, ErrorNotFoundInConfigFile
|
return nil, "", "", nil, fmt.Errorf("%w (%q)", ErrorNotFoundInConfigFile, configName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -297,7 +297,7 @@ func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.R
|
|||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
//
|
//
|
||||||
// This re-uses the internal buffer if at all possible.
|
// This reuses the internal buffer if at all possible.
|
||||||
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
if size == 0 {
|
if size == 0 {
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ func TestMemoryObject(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
checkContent(o, "Rutabaga")
|
checkContent(o, "Rutabaga")
|
||||||
assert.Equal(t, newNow, o.ModTime(context.Background()))
|
assert.Equal(t, newNow, o.ModTime(context.Background()))
|
||||||
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer
|
assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
|
||||||
|
|
||||||
// not within the buffer
|
// not within the buffer
|
||||||
newStr := "0123456789"
|
newStr := "0123456789"
|
||||||
|
|||||||
@@ -358,7 +358,7 @@ func TestRemoteServing(t *testing.T) {
|
|||||||
URL: "[notfoundremote:]/",
|
URL: "[notfoundremote:]/",
|
||||||
Status: http.StatusInternalServerError,
|
Status: http.StatusInternalServerError,
|
||||||
Expected: `{
|
Expected: `{
|
||||||
"error": "failed to make Fs: didn't find section in config file",
|
"error": "failed to make Fs: didn't find section in config file (\"notfoundremote\")",
|
||||||
"input": null,
|
"input": null,
|
||||||
"path": "/",
|
"path": "/",
|
||||||
"status": 500
|
"status": 500
|
||||||
|
|||||||
@@ -726,7 +726,7 @@ func (s *syncCopyMove) markParentNotEmpty(entry fs.DirEntry) {
|
|||||||
parentDir = ""
|
parentDir = ""
|
||||||
}
|
}
|
||||||
delete(s.srcEmptyDirs, parentDir)
|
delete(s.srcEmptyDirs, parentDir)
|
||||||
if parentDir == "" {
|
if parentDir == "" || parentDir == "/" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
parentDir = path.Dir(parentDir)
|
parentDir = path.Dir(parentDir)
|
||||||
|
|||||||
@@ -459,7 +459,7 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
|
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
f, err = fs.NewFs(context.Background(), subRemoteName)
|
f, err = fs.NewFs(context.Background(), subRemoteName)
|
||||||
if err == fs.ErrorNotFoundInConfigFile {
|
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||||
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
|
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -2121,6 +2121,144 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Run tests for bucket based Fs
|
||||||
|
// TestIntegration/FsMkdir/FsPutFiles/Bucket
|
||||||
|
t.Run("Bucket", func(t *testing.T) {
|
||||||
|
// Test if this Fs is bucket based - this test won't work for wrapped bucket based backends.
|
||||||
|
if !f.Features().BucketBased {
|
||||||
|
t.Skip("Not a bucket based backend")
|
||||||
|
}
|
||||||
|
if f.Features().CanHaveEmptyDirectories {
|
||||||
|
t.Skip("Can have empty directories")
|
||||||
|
}
|
||||||
|
if !f.Features().DoubleSlash {
|
||||||
|
t.Skip("Can't have // in paths")
|
||||||
|
}
|
||||||
|
// Create some troublesome file names
|
||||||
|
fileNames := []string{
|
||||||
|
file1.Path,
|
||||||
|
file2.Path,
|
||||||
|
".leadingdot",
|
||||||
|
"/.leadingdot",
|
||||||
|
"///tripleslash",
|
||||||
|
"//doubleslash",
|
||||||
|
"dir/.leadingdot",
|
||||||
|
"dir///tripleslash",
|
||||||
|
"dir//doubleslash",
|
||||||
|
}
|
||||||
|
dirNames := []string{
|
||||||
|
"hello? sausage",
|
||||||
|
"hello? sausage/êé",
|
||||||
|
"hello? sausage/êé/Hello, 世界",
|
||||||
|
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||||
|
"/",
|
||||||
|
"//",
|
||||||
|
"///",
|
||||||
|
"dir",
|
||||||
|
"dir/",
|
||||||
|
"dir//",
|
||||||
|
}
|
||||||
|
t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
|
||||||
|
var objs []fs.Object
|
||||||
|
for _, fileName := range fileNames[2:] {
|
||||||
|
contents := "bad file name: " + fileName
|
||||||
|
file := fstest.NewItem(fileName, contents, t1)
|
||||||
|
objs = append(objs, PutTestContents(ctx, t, f, &file, contents, true))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check they arrived
|
||||||
|
// This uses walk.Walk with a max size set to make sure we don't use ListR
|
||||||
|
check := func(f fs.Fs, dir string, wantFileNames, wantDirNames []string) {
|
||||||
|
t.Helper()
|
||||||
|
var gotFileNames, gotDirNames []string
|
||||||
|
require.NoError(t, walk.Walk(ctx, f, dir, true, 100, func(path string, entries fs.DirEntries, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if _, isObj := entry.(fs.Object); isObj {
|
||||||
|
gotFileNames = append(gotFileNames, entry.Remote())
|
||||||
|
} else {
|
||||||
|
gotDirNames = append(gotDirNames, entry.Remote())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}))
|
||||||
|
sort.Strings(wantDirNames)
|
||||||
|
sort.Strings(wantFileNames)
|
||||||
|
sort.Strings(gotDirNames)
|
||||||
|
sort.Strings(gotFileNames)
|
||||||
|
assert.Equal(t, wantFileNames, gotFileNames)
|
||||||
|
assert.Equal(t, wantDirNames, gotDirNames)
|
||||||
|
}
|
||||||
|
check(f, "", fileNames, dirNames)
|
||||||
|
check(f, "/", []string{
|
||||||
|
"/.leadingdot",
|
||||||
|
"///tripleslash",
|
||||||
|
"//doubleslash",
|
||||||
|
}, []string{
|
||||||
|
"//",
|
||||||
|
"///",
|
||||||
|
})
|
||||||
|
check(f, "//", []string{
|
||||||
|
"///tripleslash",
|
||||||
|
"//doubleslash",
|
||||||
|
}, []string{
|
||||||
|
"///",
|
||||||
|
})
|
||||||
|
check(f, "dir", []string{
|
||||||
|
"dir/.leadingdot",
|
||||||
|
"dir///tripleslash",
|
||||||
|
"dir//doubleslash",
|
||||||
|
}, []string{
|
||||||
|
"dir/",
|
||||||
|
"dir//",
|
||||||
|
})
|
||||||
|
check(f, "dir/", []string{
|
||||||
|
"dir///tripleslash",
|
||||||
|
"dir//doubleslash",
|
||||||
|
}, []string{
|
||||||
|
"dir//",
|
||||||
|
})
|
||||||
|
check(f, "dir//", []string{
|
||||||
|
"dir///tripleslash",
|
||||||
|
}, nil)
|
||||||
|
|
||||||
|
// Now create a backend not at the root of a bucket
|
||||||
|
f2, err := fs.NewFs(ctx, subRemoteName+"/dir")
|
||||||
|
require.NoError(t, err)
|
||||||
|
check(f2, "", []string{
|
||||||
|
".leadingdot",
|
||||||
|
"//tripleslash",
|
||||||
|
"/doubleslash",
|
||||||
|
}, []string{
|
||||||
|
"/",
|
||||||
|
"//",
|
||||||
|
})
|
||||||
|
check(f2, "/", []string{
|
||||||
|
"//tripleslash",
|
||||||
|
"/doubleslash",
|
||||||
|
}, []string{
|
||||||
|
"//",
|
||||||
|
})
|
||||||
|
check(f2, "//", []string{
|
||||||
|
"//tripleslash",
|
||||||
|
}, []string(nil))
|
||||||
|
|
||||||
|
// Remove the objects
|
||||||
|
for _, obj := range objs {
|
||||||
|
assert.NoError(t, obj.Remove(ctx))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check they are gone
|
||||||
|
fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
|
||||||
|
"hello? sausage",
|
||||||
|
"hello? sausage/êé",
|
||||||
|
"hello? sausage/êé/Hello, 世界",
|
||||||
|
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||||
|
}, fs.GetModifyWindow(ctx, f))
|
||||||
|
})
|
||||||
|
|
||||||
// State of remote at the moment the internal tests are called
|
// State of remote at the moment the internal tests are called
|
||||||
InternalTestFiles = []fstest.Item{file1, file2}
|
InternalTestFiles = []fstest.Item{file1, file2}
|
||||||
|
|
||||||
@@ -2391,7 +2529,7 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
var itemCopy = item
|
var itemCopy = item
|
||||||
itemCopy.Path += ".copy"
|
itemCopy.Path += ".copy"
|
||||||
|
|
||||||
// Set copy cutoff to mininum value so we make chunks
|
// Set copy cutoff to minimum value so we make chunks
|
||||||
origCutoff, err := do.SetCopyCutoff(minChunkSize)
|
origCutoff, err := do.SetCopyCutoff(minChunkSize)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|||||||
3
go.mod
3
go.mod
@@ -36,7 +36,6 @@ require (
|
|||||||
github.com/go-chi/chi/v5 v5.2.0
|
github.com/go-chi/chi/v5 v5.2.0
|
||||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||||
github.com/go-git/go-billy/v5 v5.6.2
|
github.com/go-git/go-billy/v5 v5.6.2
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1
|
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/hanwen/go-fuse/v2 v2.7.2
|
github.com/hanwen/go-fuse/v2 v2.7.2
|
||||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||||
@@ -151,6 +150,7 @@ require (
|
|||||||
github.com/goccy/go-json v0.10.4 // indirect
|
github.com/goccy/go-json v0.10.4 // indirect
|
||||||
github.com/gofrs/flock v0.8.1 // indirect
|
github.com/gofrs/flock v0.8.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||||
github.com/google/s2a-go v0.1.8 // indirect
|
github.com/google/s2a-go v0.1.8 // indirect
|
||||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||||
@@ -225,6 +225,7 @@ require (
|
|||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||||
github.com/ProtonMail/go-crypto v1.1.4
|
github.com/ProtonMail/go-crypto v1.1.4
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||||
github.com/pkg/xattr v0.4.10
|
github.com/pkg/xattr v0.4.10
|
||||||
golang.org/x/mobile v0.0.0-20250106192035-c31d5b91ecc3
|
golang.org/x/mobile v0.0.0-20250106192035-c31d5b91ecc3
|
||||||
golang.org/x/term v0.28.0
|
golang.org/x/term v0.28.0
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -287,6 +287,8 @@ github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
|||||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
|||||||
@@ -66,8 +66,9 @@ default based on the batch_mode in use.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "batch_commit_timeout",
|
Name: "batch_commit_timeout",
|
||||||
Help: `Max time to wait for a batch to finish committing`,
|
Help: `Max time to wait for a batch to finish committing. (no longer used)`,
|
||||||
Default: fs.Duration(10 * time.Minute),
|
Default: fs.Duration(10 * time.Minute),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
}}
|
}}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -31,7 +31,9 @@ func Split(absPath string) (bucket, bucketPath string) {
|
|||||||
|
|
||||||
// Join path1 and path2
|
// Join path1 and path2
|
||||||
//
|
//
|
||||||
// Like path.Join but does not clean the path - useful to preserve trailing /
|
// Like path.Join but does not clean the path - useful to preserve trailing /.
|
||||||
|
//
|
||||||
|
// It also does not clean multiple // in the path.
|
||||||
func Join(path1, path2 string) string {
|
func Join(path1, path2 string) string {
|
||||||
if path1 == "" {
|
if path1 == "" {
|
||||||
return path2
|
return path2
|
||||||
@@ -39,7 +41,22 @@ func Join(path1, path2 string) string {
|
|||||||
if path2 == "" {
|
if path2 == "" {
|
||||||
return path1
|
return path1
|
||||||
}
|
}
|
||||||
return strings.TrimSuffix(path1, "/") + "/" + strings.TrimPrefix(path2, "/")
|
return path1 + "/" + path2
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAllSlashes returns true if s is all / characters.
|
||||||
|
//
|
||||||
|
// It returns false if s is "".
|
||||||
|
func IsAllSlashes(s string) bool {
|
||||||
|
if len(s) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range s {
|
||||||
|
if c != '/' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache stores whether buckets are available and their IDs
|
// Cache stores whether buckets are available and their IDs
|
||||||
|
|||||||
@@ -34,10 +34,10 @@ func TestJoin(t *testing.T) {
|
|||||||
{in1: "in1", in2: "", want: "in1"},
|
{in1: "in1", in2: "", want: "in1"},
|
||||||
{in1: "", in2: "in2", want: "in2"},
|
{in1: "", in2: "in2", want: "in2"},
|
||||||
{in1: "in1", in2: "in2", want: "in1/in2"},
|
{in1: "in1", in2: "in2", want: "in1/in2"},
|
||||||
{in1: "in1/", in2: "in2", want: "in1/in2"},
|
{in1: "in1/", in2: "in2", want: "in1//in2"},
|
||||||
{in1: "in1", in2: "/in2", want: "in1/in2"},
|
{in1: "in1", in2: "/in2", want: "in1//in2"},
|
||||||
{in1: "in1", in2: "in2/", want: "in1/in2/"},
|
{in1: "in1", in2: "in2/", want: "in1/in2/"},
|
||||||
{in1: "/in1", in2: "/in2", want: "/in1/in2"},
|
{in1: "/in1", in2: "/in2", want: "/in1//in2"},
|
||||||
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
|
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
|
||||||
} {
|
} {
|
||||||
got := Join(test.in1, test.in2)
|
got := Join(test.in1, test.in2)
|
||||||
@@ -45,6 +45,24 @@ func TestJoin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsAllSlashes(t *testing.T) {
|
||||||
|
for _, test := range []struct {
|
||||||
|
in string
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
{in: "", want: false},
|
||||||
|
{in: "/", want: true},
|
||||||
|
{in: "x/", want: false},
|
||||||
|
{in: "/x", want: false},
|
||||||
|
{in: "//", want: true},
|
||||||
|
{in: "/x/", want: false},
|
||||||
|
{in: "///", want: true},
|
||||||
|
} {
|
||||||
|
got := IsAllSlashes(test.in)
|
||||||
|
assert.Equal(t, test.want, got, test.in)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCache(t *testing.T) {
|
func TestCache(t *testing.T) {
|
||||||
c := NewCache()
|
c := NewCache()
|
||||||
errBoom := errors.New("boom")
|
errBoom := errors.New("boom")
|
||||||
|
|||||||
@@ -19,7 +19,11 @@ By default this will serve files without needing a login.
|
|||||||
You can either use an htpasswd file which can take lots of users, or
|
You can either use an htpasswd file which can take lots of users, or
|
||||||
set a single username and password with the ` + "`--{{ .Prefix }}user` and `--{{ .Prefix }}pass`" + ` flags.
|
set a single username and password with the ` + "`--{{ .Prefix }}user` and `--{{ .Prefix }}pass`" + ` flags.
|
||||||
|
|
||||||
If no static users are configured by either of the above methods, and client
|
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||||
|
username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}--user-from-header=x-remote-user`" + `).
|
||||||
|
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||||
|
|
||||||
|
If either of the above authentication methods is not configured and client
|
||||||
certificates are required by the ` + "`--client-ca`" + ` flag passed to the server, the
|
certificates are required by the ` + "`--client-ca`" + ` flag passed to the server, the
|
||||||
client certificate common name will be considered as the username.
|
client certificate common name will be considered as the username.
|
||||||
|
|
||||||
@@ -85,6 +89,10 @@ var AuthConfigInfo = fs.Options{{
|
|||||||
Name: "salt",
|
Name: "salt",
|
||||||
Default: "dlPL2MqE",
|
Default: "dlPL2MqE",
|
||||||
Help: "Password hashing salt",
|
Help: "Password hashing salt",
|
||||||
|
}, {
|
||||||
|
Name: "user_from_header",
|
||||||
|
Default: "",
|
||||||
|
Help: "User name from a defined HTTP header",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// AuthConfig contains options for the http authentication
|
// AuthConfig contains options for the http authentication
|
||||||
@@ -94,6 +102,7 @@ type AuthConfig struct {
|
|||||||
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
|
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
|
||||||
BasicPass string `config:"pass"` // password for BasicUser
|
BasicPass string `config:"pass"` // password for BasicUser
|
||||||
Salt string `config:"salt"` // password hashing salt
|
Salt string `config:"salt"` // password hashing salt
|
||||||
|
UserFromHeader string `config:"user_from_header"` // retrieve user name from a defined HTTP header
|
||||||
CustomAuthFn CustomAuthFn `json:"-" config:"-"` // custom Auth (not set by command line flags)
|
CustomAuthFn CustomAuthFn `json:"-" config:"-"` // custom Auth (not set by command line flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,6 +113,7 @@ func (cfg *AuthConfig) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
|
|||||||
flags.StringVarP(flagSet, &cfg.BasicUser, prefix+"user", "", cfg.BasicUser, "User name for authentication", prefix)
|
flags.StringVarP(flagSet, &cfg.BasicUser, prefix+"user", "", cfg.BasicUser, "User name for authentication", prefix)
|
||||||
flags.StringVarP(flagSet, &cfg.BasicPass, prefix+"pass", "", cfg.BasicPass, "Password for authentication", prefix)
|
flags.StringVarP(flagSet, &cfg.BasicPass, prefix+"pass", "", cfg.BasicPass, "Password for authentication", prefix)
|
||||||
flags.StringVarP(flagSet, &cfg.Salt, prefix+"salt", "", cfg.Salt, "Password hashing salt", prefix)
|
flags.StringVarP(flagSet, &cfg.Salt, prefix+"salt", "", cfg.Salt, "Password hashing salt", prefix)
|
||||||
|
flags.StringVarP(flagSet, &cfg.UserFromHeader, prefix+"user-from-header", "", cfg.UserFromHeader, "Retrieve the username from a specified HTTP header if no other authentication methods are configured (ideal for proxied setups)", prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddAuthFlagsPrefix adds flags to the flag set for AuthConfig
|
// AddAuthFlagsPrefix adds flags to the flag set for AuthConfig
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -153,6 +154,26 @@ func MiddlewareAuthCustom(fn CustomAuthFn, realm string, userFromContext bool) M
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var validUsernameRegexp = regexp.MustCompile(`^[\p{L}\d@._-]+$`)
|
||||||
|
|
||||||
|
// MiddlewareAuthGetUserFromHeader middleware that bypasses authentication and extracts the user via a specified HTTP header(ideal for proxied setups).
|
||||||
|
func MiddlewareAuthGetUserFromHeader(header string) Middleware {
|
||||||
|
return func(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
username := strings.TrimSpace(r.Header.Get(header))
|
||||||
|
if username != "" && validUsernameRegexp.MatchString(username) {
|
||||||
|
r = r.WithContext(context.WithValue(r.Context(), ctxKeyUser, username))
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
code := http.StatusUnauthorized
|
||||||
|
w.Header().Set("Content-Type", "text/plain")
|
||||||
|
http.Error(w, http.StatusText(code), code)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var onlyOnceWarningAllowOrigin sync.Once
|
var onlyOnceWarningAllowOrigin sync.Once
|
||||||
|
|
||||||
// MiddlewareCORS instantiates middleware that handles basic CORS protections for rcd
|
// MiddlewareCORS instantiates middleware that handles basic CORS protections for rcd
|
||||||
|
|||||||
@@ -15,6 +15,8 @@ import (
|
|||||||
func TestMiddlewareAuth(t *testing.T) {
|
func TestMiddlewareAuth(t *testing.T) {
|
||||||
servers := []struct {
|
servers := []struct {
|
||||||
name string
|
name string
|
||||||
|
expectedUser string
|
||||||
|
remoteUser string
|
||||||
http Config
|
http Config
|
||||||
auth AuthConfig
|
auth AuthConfig
|
||||||
user string
|
user string
|
||||||
@@ -85,9 +87,32 @@ func TestMiddlewareAuth(t *testing.T) {
|
|||||||
},
|
},
|
||||||
user: "custom",
|
user: "custom",
|
||||||
pass: "custom",
|
pass: "custom",
|
||||||
|
}, {
|
||||||
|
name: "UserFromHeader",
|
||||||
|
remoteUser: "remoteUser",
|
||||||
|
expectedUser: "remoteUser",
|
||||||
|
http: Config{
|
||||||
|
ListenAddr: []string{"127.0.0.1:0"},
|
||||||
|
},
|
||||||
|
auth: AuthConfig{
|
||||||
|
UserFromHeader: "X-Remote-User",
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "UserFromHeader/MixedWithHtPasswd",
|
||||||
|
remoteUser: "remoteUser",
|
||||||
|
expectedUser: "md5",
|
||||||
|
http: Config{
|
||||||
|
ListenAddr: []string{"127.0.0.1:0"},
|
||||||
|
},
|
||||||
|
auth: AuthConfig{
|
||||||
|
UserFromHeader: "X-Remote-User",
|
||||||
|
Realm: "test",
|
||||||
|
HtPasswd: "./testdata/.htpasswd",
|
||||||
|
},
|
||||||
|
user: "md5",
|
||||||
|
pass: "md5",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, ss := range servers {
|
for _, ss := range servers {
|
||||||
t.Run(ss.name, func(t *testing.T) {
|
t.Run(ss.name, func(t *testing.T) {
|
||||||
s, err := NewServer(context.Background(), WithConfig(ss.http), WithAuth(ss.auth))
|
s, err := NewServer(context.Background(), WithConfig(ss.http), WithAuth(ss.auth))
|
||||||
@@ -97,7 +122,12 @@ func TestMiddlewareAuth(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
expected := []byte("secret-page")
|
expected := []byte("secret-page")
|
||||||
|
if ss.expectedUser != "" {
|
||||||
|
s.Router().Mount("/", testAuthUserHandler())
|
||||||
|
} else {
|
||||||
s.Router().Mount("/", testEchoHandler(expected))
|
s.Router().Mount("/", testEchoHandler(expected))
|
||||||
|
}
|
||||||
|
|
||||||
s.Serve()
|
s.Serve()
|
||||||
|
|
||||||
url := testGetServerURL(t, s)
|
url := testGetServerURL(t, s)
|
||||||
@@ -114,18 +144,24 @@ func TestMiddlewareAuth(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using no creds should return unauthorized")
|
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using no creds should return unauthorized")
|
||||||
|
if ss.auth.UserFromHeader == "" {
|
||||||
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
||||||
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
||||||
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("BadCreds", func(t *testing.T) {
|
t.Run("BadCreds", func(t *testing.T) {
|
||||||
client := &http.Client{}
|
client := &http.Client{}
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if ss.user != "" {
|
||||||
req.SetBasicAuth(ss.user+"BAD", ss.pass+"BAD")
|
req.SetBasicAuth(ss.user+"BAD", ss.pass+"BAD")
|
||||||
|
}
|
||||||
|
|
||||||
|
if ss.auth.UserFromHeader != "" {
|
||||||
|
req.Header.Set(ss.auth.UserFromHeader, "/test:")
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -134,10 +170,11 @@ func TestMiddlewareAuth(t *testing.T) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using bad creds should return unauthorized")
|
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using bad creds should return unauthorized")
|
||||||
|
if ss.auth.UserFromHeader == "" {
|
||||||
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
||||||
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
||||||
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("GoodCreds", func(t *testing.T) {
|
t.Run("GoodCreds", func(t *testing.T) {
|
||||||
@@ -145,7 +182,13 @@ func TestMiddlewareAuth(t *testing.T) {
|
|||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if ss.user != "" {
|
||||||
req.SetBasicAuth(ss.user, ss.pass)
|
req.SetBasicAuth(ss.user, ss.pass)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ss.auth.UserFromHeader != "" {
|
||||||
|
req.Header.Set(ss.auth.UserFromHeader, ss.remoteUser)
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := client.Do(req)
|
resp, err := client.Do(req)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -155,7 +198,11 @@ func TestMiddlewareAuth(t *testing.T) {
|
|||||||
|
|
||||||
require.Equal(t, http.StatusOK, resp.StatusCode, "using good creds should return ok")
|
require.Equal(t, http.StatusOK, resp.StatusCode, "using good creds should return ok")
|
||||||
|
|
||||||
|
if ss.expectedUser != "" {
|
||||||
|
testExpectRespBody(t, resp, []byte(ss.expectedUser))
|
||||||
|
} else {
|
||||||
testExpectRespBody(t, resp, expected)
|
testExpectRespBody(t, resp, expected)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -392,16 +392,23 @@ func NewServer(ctx context.Context, options ...Option) (*Server, error) {
|
|||||||
|
|
||||||
func (s *Server) initAuth() {
|
func (s *Server) initAuth() {
|
||||||
s.usingAuth = false
|
s.usingAuth = false
|
||||||
|
altUsernameEnabled := s.auth.HtPasswd == "" && s.auth.BasicUser == ""
|
||||||
|
|
||||||
authCertificateUserEnabled := s.tlsConfig != nil && s.tlsConfig.ClientAuth != tls.NoClientCert && s.auth.HtPasswd == "" && s.auth.BasicUser == ""
|
if altUsernameEnabled {
|
||||||
if authCertificateUserEnabled {
|
|
||||||
s.usingAuth = true
|
s.usingAuth = true
|
||||||
|
if s.auth.UserFromHeader != "" {
|
||||||
|
s.mux.Use(MiddlewareAuthGetUserFromHeader(s.auth.UserFromHeader))
|
||||||
|
} else if s.tlsConfig != nil && s.tlsConfig.ClientAuth != tls.NoClientCert {
|
||||||
s.mux.Use(MiddlewareAuthCertificateUser())
|
s.mux.Use(MiddlewareAuthCertificateUser())
|
||||||
|
} else {
|
||||||
|
s.usingAuth = false
|
||||||
|
altUsernameEnabled = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.auth.CustomAuthFn != nil {
|
if s.auth.CustomAuthFn != nil {
|
||||||
s.usingAuth = true
|
s.usingAuth = true
|
||||||
s.mux.Use(MiddlewareAuthCustom(s.auth.CustomAuthFn, s.auth.Realm, authCertificateUserEnabled))
|
s.mux.Use(MiddlewareAuthCustom(s.auth.CustomAuthFn, s.auth.Realm, altUsernameEnabled))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,84 +0,0 @@
|
|||||||
package jwtutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v5"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The following is the declaration of the StandardClaims type from jwt-go v4
|
|
||||||
// (https://github.com/golang-jwt/jwt/blob/v4/claims.go), where it was marked
|
|
||||||
// as deprecated before later removed in v5. It was distributed under the terms
|
|
||||||
// of the MIT License (https://github.com/golang-jwt/jwt/blob/v4/LICENSE), with
|
|
||||||
// the copy right notice included below. We have renamed the type to
|
|
||||||
// LegacyStandardClaims to avoid confusion, and made it compatible with
|
|
||||||
// jwt-go v5 by implementing functions to satisfy the changed Claims interface.
|
|
||||||
|
|
||||||
// Copyright (c) 2012 Dave Grijalva
|
|
||||||
// Copyright (c) 2021 golang-jwt maintainers
|
|
||||||
//
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
// a copy of this software and associated documentation files (the
|
|
||||||
// "Software"), to deal in the Software without restriction, including
|
|
||||||
// without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
// distribute, sublicense, and/or sell copies of the Software, and to permit
|
|
||||||
// persons to whom the Software is furnished to do so, subject to the
|
|
||||||
// following conditions:
|
|
||||||
//
|
|
||||||
// The above copyright notice and this permission notice shall be included
|
|
||||||
// in all copies or substantial portions of the Software.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
// DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
// LegacyStandardClaims are a structured version of the JWT Claims Set, as referenced at
|
|
||||||
// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the
|
|
||||||
// specification exactly, since they were based on an earlier draft of the
|
|
||||||
// specification and not updated. The main difference is that they only
|
|
||||||
// support integer-based date fields and singular audiences. This might lead to
|
|
||||||
// incompatibilities with other JWT implementations. The use of this is discouraged, instead
|
|
||||||
// the newer RegisteredClaims struct should be used.
|
|
||||||
type LegacyStandardClaims struct {
|
|
||||||
Audience string `json:"aud,omitempty"`
|
|
||||||
ExpiresAt int64 `json:"exp,omitempty"`
|
|
||||||
ID string `json:"jti,omitempty"`
|
|
||||||
IssuedAt int64 `json:"iat,omitempty"`
|
|
||||||
Issuer string `json:"iss,omitempty"`
|
|
||||||
NotBefore int64 `json:"nbf,omitempty"`
|
|
||||||
Subject string `json:"sub,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExpirationTime implements the Claims interface.
|
|
||||||
func (c LegacyStandardClaims) GetExpirationTime() (*jwt.NumericDate, error) {
|
|
||||||
return jwt.NewNumericDate(time.Unix(c.ExpiresAt, 0)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIssuedAt implements the Claims interface.
|
|
||||||
func (c LegacyStandardClaims) GetIssuedAt() (*jwt.NumericDate, error) {
|
|
||||||
return jwt.NewNumericDate(time.Unix(c.IssuedAt, 0)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNotBefore implements the Claims interface.
|
|
||||||
func (c LegacyStandardClaims) GetNotBefore() (*jwt.NumericDate, error) {
|
|
||||||
return jwt.NewNumericDate(time.Unix(c.NotBefore, 0)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetIssuer implements the Claims interface.
|
|
||||||
func (c LegacyStandardClaims) GetIssuer() (string, error) {
|
|
||||||
return c.Issuer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSubject implements the Claims interface.
|
|
||||||
func (c LegacyStandardClaims) GetSubject() (string, error) {
|
|
||||||
return c.Subject, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAudience implements the Claims interface.
|
|
||||||
func (c LegacyStandardClaims) GetAudience() (jwt.ClaimStrings, error) {
|
|
||||||
return []string{c.Audience}, nil
|
|
||||||
}
|
|
||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang-jwt/jwt/v5"
|
"github.com/golang-jwt/jwt/v4"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
|
|||||||
@@ -464,7 +464,7 @@ the |--vfs-cache-mode| is off, it will return an empty result.
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
The |expiry| time is the time until the file is elegible for being
|
The |expiry| time is the time until the file is eligible for being
|
||||||
uploaded in floating point seconds. This may go negative. As rclone
|
uploaded in floating point seconds. This may go negative. As rclone
|
||||||
only transfers |--transfers| files at once, only the lowest
|
only transfers |--transfers| files at once, only the lowest
|
||||||
|--transfers| expiry times will have |uploading| as |true|. So there
|
|--transfers| expiry times will have |uploading| as |true|. So there
|
||||||
|
|||||||
@@ -216,7 +216,7 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
|
|||||||
configName := fs.ConfigString(f)
|
configName := fs.ConfigString(f)
|
||||||
for _, activeVFS := range active[configName] {
|
for _, activeVFS := range active[configName] {
|
||||||
if vfs.Opt == activeVFS.Opt {
|
if vfs.Opt == activeVFS.Opt {
|
||||||
fs.Debugf(f, "Re-using VFS from active cache")
|
fs.Debugf(f, "Reusing VFS from active cache")
|
||||||
activeVFS.inUse.Add(1)
|
activeVFS.inUse.Add(1)
|
||||||
return activeVFS
|
return activeVFS
|
||||||
}
|
}
|
||||||
@@ -365,6 +365,11 @@ func (vfs *VFS) Shutdown() {
|
|||||||
activeMu.Unlock()
|
activeMu.Unlock()
|
||||||
|
|
||||||
vfs.shutdownCache()
|
vfs.shutdownCache()
|
||||||
|
|
||||||
|
if vfs.pollChan != nil {
|
||||||
|
close(vfs.pollChan)
|
||||||
|
vfs.pollChan = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes the contents of the on disk cache
|
// CleanUp deletes the contents of the on disk cache
|
||||||
|
|||||||
@@ -428,7 +428,7 @@ func TestItemReloadCacheStale(t *testing.T) {
|
|||||||
assert.Equal(t, int64(110), fi.Size())
|
assert.Equal(t, int64(110), fi.Size())
|
||||||
|
|
||||||
// Write to the file to make it dirty
|
// Write to the file to make it dirty
|
||||||
// This checks we aren't re-using stale data
|
// This checks we aren't reusing stale data
|
||||||
n, err := item.WriteAt([]byte("HELLO"), 0)
|
n, err := item.WriteAt([]byte("HELLO"), 0)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, 5, n)
|
assert.Equal(t, 5, n)
|
||||||
|
|||||||
Reference in New Issue
Block a user