mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 04:13:14 +00:00
Compare commits
55 Commits
fix-8569-s
...
fix-auth-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
69ae5f2aaf | ||
|
|
c837664653 | ||
|
|
77429b154e | ||
|
|
39b8f17ebb | ||
|
|
81ecfb0f64 | ||
|
|
656e789c5b | ||
|
|
fe19184084 | ||
|
|
b4990cd858 | ||
|
|
8e955c6b13 | ||
|
|
3a5ddfcd3c | ||
|
|
ac3f7a87c3 | ||
|
|
4e9b63e141 | ||
|
|
7fd7fe3c82 | ||
|
|
9dff45563d | ||
|
|
83cf8fb821 | ||
|
|
32e79a5c5c | ||
|
|
fc44a8114e | ||
|
|
657172ef77 | ||
|
|
71eb4199c3 | ||
|
|
ac3c21368d | ||
|
|
db71b2bd5f | ||
|
|
8cfe42d09f | ||
|
|
e673a28a72 | ||
|
|
59889ce46b | ||
|
|
62e8a01e7e | ||
|
|
87eaf37629 | ||
|
|
7c7606a6cf | ||
|
|
dbb21165d4 | ||
|
|
375953cba3 | ||
|
|
af5385b344 | ||
|
|
347be176af | ||
|
|
bf5a4774c6 | ||
|
|
0275d3edf2 | ||
|
|
be53ae98f8 | ||
|
|
0d9fe51632 | ||
|
|
03bd795221 | ||
|
|
5a4026ccb4 | ||
|
|
b1d4de69c2 | ||
|
|
5316acd046 | ||
|
|
2c72842c10 | ||
|
|
4a81f12c26 | ||
|
|
aabda1cda2 | ||
|
|
572fe20f8e | ||
|
|
2fd4c45b34 | ||
|
|
ec5489e23f | ||
|
|
6898375a2d | ||
|
|
d413443a6a | ||
|
|
5039747f26 | ||
|
|
11ba4ac539 | ||
|
|
b4ed7fb7d7 | ||
|
|
719473565e | ||
|
|
bd7278d7e9 | ||
|
|
45ba81c726 | ||
|
|
530658e0cc | ||
|
|
b742705d0c |
28
.github/workflows/build.yml
vendored
28
.github/workflows/build.yml
vendored
@@ -26,12 +26,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@@ -42,14 +42,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@@ -58,14 +58,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@@ -75,23 +75,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '>=1.23.0-rc.1'
|
||||
go: '>=1.24.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.22
|
||||
os: ubuntu-latest
|
||||
go: '1.22'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.23
|
||||
os: ubuntu-latest
|
||||
go: '1.23'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@@ -311,7 +311,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '>=1.23.0-rc.1'
|
||||
go-version: '>=1.24.0-rc.1'
|
||||
|
||||
- name: Set global environment variables
|
||||
shell: bash
|
||||
|
||||
@@ -12,6 +12,8 @@ RUN ./rclone version
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
|
||||
echo "user_allow_other" >> /etc/fuse.conf
|
||||
|
||||
|
||||
11
RELEASE.md
11
RELEASE.md
@@ -47,13 +47,20 @@ Early in the next release cycle update the dependencies.
|
||||
* `git commit -a -v -m "build: update all dependencies"`
|
||||
|
||||
If the `make updatedirect` upgrades the version of go in the `go.mod`
|
||||
then go to manual mode. `go1.20` here is the lowest supported version
|
||||
|
||||
go 1.22.0
|
||||
|
||||
then go to manual mode. `go1.22` here is the lowest supported version
|
||||
in the `go.mod`.
|
||||
|
||||
If `make updatedirect` added a `toolchain` directive then remove it.
|
||||
We don't want to force a toolchain on our users. Linux packagers are
|
||||
often using a version of Go that is a few versions out of date.
|
||||
|
||||
```
|
||||
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
|
||||
go get -d $(cat /tmp/potential-upgrades)
|
||||
go mod tidy -go=1.20 -compat=1.20
|
||||
go mod tidy -go=1.22 -compat=1.22
|
||||
```
|
||||
|
||||
If the `go mod tidy` fails use the output from it to remove the
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,16 +3,149 @@
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
// Check first feature flags are set on this
|
||||
// remote
|
||||
func TestBlockIDCreator(t *testing.T) {
|
||||
// Check creation and random number
|
||||
bic, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
bic2, err := newBlockIDCreator()
|
||||
require.NoError(t, err)
|
||||
assert.NotEqual(t, bic.random, bic2.random)
|
||||
assert.NotEqual(t, bic.random, [8]byte{})
|
||||
|
||||
// Set random to known value for tests
|
||||
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
chunkNumber := uint64(0xFEDCBA9876543210)
|
||||
|
||||
// Check creation of ID
|
||||
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
|
||||
got := bic.newBlockID(chunkNumber)
|
||||
assert.Equal(t, want, got)
|
||||
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
|
||||
|
||||
// Test checkID is working
|
||||
assert.NoError(t, bic.checkID(chunkNumber, got))
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
|
||||
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
|
||||
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
|
||||
}
|
||||
|
||||
func (f *Fs) testFeatures(t *testing.T) {
|
||||
// Check first feature flags are set on this remote
|
||||
enabled := f.Features().SetTier
|
||||
assert.True(t, enabled)
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
type ReadSeekCloser struct {
|
||||
*strings.Reader
|
||||
}
|
||||
|
||||
func (r *ReadSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stage a block at remote but don't commit it
|
||||
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
|
||||
var (
|
||||
containerName, blobPath = f.split(remote)
|
||||
containerClient = f.cntSVC(containerName)
|
||||
blobClient = containerClient.NewBlockBlobClient(blobPath)
|
||||
data = "uncommitted data"
|
||||
blockID = "1"
|
||||
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
|
||||
)
|
||||
r := &ReadSeekCloser{strings.NewReader(data)}
|
||||
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the block is staged but not committed
|
||||
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, block := range blockList.UncommittedBlocks {
|
||||
if *block.Name == blockIDBase64 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.True(t, found, "Block ID not found in uncommitted blocks")
|
||||
}
|
||||
|
||||
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
|
||||
//
|
||||
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
|
||||
//
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
|
||||
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
var (
|
||||
ctx = context.Background()
|
||||
remote = "testBlob"
|
||||
)
|
||||
|
||||
// Multipart copy the blob please
|
||||
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
|
||||
f.opt.UseCopyBlob = false
|
||||
f.opt.CopyCutoff = f.opt.ChunkSize
|
||||
defer func() {
|
||||
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
|
||||
}()
|
||||
|
||||
// Create a blob with uncommitted blocks
|
||||
f.stageBlockWithoutCommit(ctx, t, remote)
|
||||
|
||||
// Now attempt to overwrite the block with a different sized block ID to provoke this error
|
||||
|
||||
// Check the object does not exist
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
|
||||
// Upload a multipart file over the block with uncommitted chunks of a different ID size
|
||||
size := 4*int(f.opt.ChunkSize) - 1
|
||||
contents := random.String(size)
|
||||
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
|
||||
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
|
||||
// Check size
|
||||
assert.Equal(t, int64(size), o.Size())
|
||||
|
||||
// Create a new blob with uncommitted blocks
|
||||
newRemote := "testBlob2"
|
||||
f.stageBlockWithoutCommit(ctx, t, newRemote)
|
||||
|
||||
// Copy over that block
|
||||
dst, err := f.Copy(ctx, o, newRemote)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check basics
|
||||
assert.Equal(t, int64(size), dst.Size())
|
||||
assert.Equal(t, newRemote, dst.Remote())
|
||||
|
||||
// Check contents
|
||||
gotContents := fstests.ReadObject(ctx, t, dst, -1)
|
||||
assert.Equal(t, contents, gotContents)
|
||||
|
||||
// Remove the object
|
||||
require.NoError(t, dst.Remove(ctx))
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
}
|
||||
|
||||
@@ -15,13 +15,17 @@ import (
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
name := "TestAzureBlob"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*Object)(nil),
|
||||
TiersToTest: []string{"Hot", "Cool", "Cold"},
|
||||
ChunkedUpload: fstests.ChunkedUploadConfig{
|
||||
MinChunkSize: defaultChunkSize,
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -40,6 +44,7 @@ func TestIntegration2(t *testing.T) {
|
||||
},
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "directory_markers", Value: "true"},
|
||||
{Name: name, Key: "use_copy_blob", Value: "false"},
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -48,8 +53,13 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setUploadChunkSize(cs)
|
||||
}
|
||||
|
||||
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
|
||||
return f.setCopyCutoff(cs)
|
||||
}
|
||||
|
||||
var (
|
||||
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
|
||||
_ fstests.SetCopyCutoffer = (*Fs)(nil)
|
||||
)
|
||||
|
||||
func TestValidateAccessTier(t *testing.T) {
|
||||
|
||||
@@ -237,6 +237,30 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
||||
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "disable_instance_discovery",
|
||||
Help: `Skip requesting Microsoft Entra instance metadata
|
||||
This should be set true only by applications authenticating in
|
||||
disconnected clouds, or private clouds such as Azure Stack.
|
||||
It determines whether rclone requests Microsoft Entra instance
|
||||
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||
authenticating.
|
||||
Setting this to true will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_az",
|
||||
Help: `Use Azure CLI tool az for authentication
|
||||
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the az CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set env_auth at the same time.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||
@@ -319,10 +343,12 @@ type Options struct {
|
||||
Username string `config:"username"`
|
||||
Password string `config:"password"`
|
||||
ServicePrincipalFile string `config:"service_principal_file"`
|
||||
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||
UseMSI bool `config:"use_msi"`
|
||||
MSIObjectID string `config:"msi_object_id"`
|
||||
MSIClientID string `config:"msi_client_id"`
|
||||
MSIResourceID string `config:"msi_mi_res_id"`
|
||||
UseAZ bool `config:"use_az"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
|
||||
@@ -414,7 +440,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
}
|
||||
// Read credentials from the environment
|
||||
options := azidentity.DefaultAzureCredentialOptions{
|
||||
ClientOptions: policyClientOptions,
|
||||
ClientOptions: policyClientOptions,
|
||||
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||
}
|
||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||
if err != nil {
|
||||
@@ -425,6 +452,13 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
|
||||
}
|
||||
case opt.UseAZ:
|
||||
var options = azidentity.AzureCLICredentialOptions{}
|
||||
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||
fmt.Println(cred)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||
}
|
||||
case opt.SASURL != "":
|
||||
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
|
||||
if err != nil {
|
||||
@@ -899,7 +933,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
|
||||
|
||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||
//
|
||||
// May make a network request becaue the [fs.List] method does not
|
||||
// May make a network request because the [fs.List] method does not
|
||||
// return MD5 hashes for DirEntry
|
||||
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
|
||||
if ty != hash.MD5 {
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
@@ -1318,16 +1319,22 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
||||
// Check current version of the file
|
||||
if deleteHidden && object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(remote, "Deleting (id %q)", object.ID)
|
||||
toBeDeleted <- object
|
||||
if !operations.SkipDestructive(ctx, object.Name, "delete") {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
}
|
||||
last = remote
|
||||
tr.Done(ctx, nil)
|
||||
@@ -2293,8 +2300,10 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
|
||||
|
||||
}
|
||||
|
||||
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
|
||||
|
||||
var bucket *api.Bucket
|
||||
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil {
|
||||
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
|
||||
bucketID, err := f.getBucketID(ctx, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
@@ -463,24 +465,161 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Cleanup", func(t *testing.T) {
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
// Set --b2-versions for this test
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
fstest.CheckListing(t, f, items)
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should be unchanged after dry run
|
||||
before := listAllFiles(ctx, t, f, dirName)
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
after := listAllFiles(ctx, t, f, dirName)
|
||||
assert.Equal(t, before, after)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
f.opt.Versions = true
|
||||
defer func() {
|
||||
f.opt.Versions = false
|
||||
}()
|
||||
// Listing should reflect current state after cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, true, false, 0))
|
||||
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
|
||||
fstest.CheckListing(t, f, items)
|
||||
})
|
||||
})
|
||||
|
||||
// Purge gets tested later
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// B2CleanupHidden tests cleaning up hidden files
|
||||
t.Run("CleanupUnfinished", func(t *testing.T) {
|
||||
dirName := "unfinished"
|
||||
fileCount := 5
|
||||
expectedFiles := []string{}
|
||||
for i := 1; i < fileCount; i++ {
|
||||
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
|
||||
expectedFiles = append(expectedFiles, fileName)
|
||||
obj := &Object{
|
||||
fs: f,
|
||||
remote: fileName,
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
|
||||
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// Listing should not change after dry run
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, expectedFiles)
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
// Listing should be empty after real cleanup
|
||||
require.NoError(t, f.cleanUp(ctx, false, true, 0))
|
||||
checkListing(ctx, t, f, dirName, []string{})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
|
||||
bucket, directory := f.split(dirName)
|
||||
foundFiles := []string{}
|
||||
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
foundFiles = append(foundFiles, object.Name)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(foundFiles)
|
||||
return foundFiles
|
||||
}
|
||||
|
||||
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
|
||||
foundFiles := listAllFiles(ctx, t, f, dirName)
|
||||
sort.Strings(expectedFiles)
|
||||
assert.Equal(t, expectedFiles, foundFiles)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
opt := map[string]string{}
|
||||
|
||||
t.Run("InitState", func(t *testing.T) {
|
||||
// There should be no lifecycle rules at the outset
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("DryRun", func(t *testing.T) {
|
||||
// There should still be no lifecycle rules after each dry run operation
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.DryRun = true
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(lifecycleRules))
|
||||
})
|
||||
|
||||
t.Run("RealThing", func(t *testing.T) {
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
|
||||
delete(opt, "daysFromHidingToDeleting")
|
||||
opt["daysFromUploadingToHiding"] = "40"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
|
||||
opt["daysFromHidingToDeleting"] = "30"
|
||||
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
|
||||
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(lifecycleRules))
|
||||
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
|
||||
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
|
||||
})
|
||||
}
|
||||
|
||||
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Metadata", f.InternalTestMetadata)
|
||||
t.Run("Versions", f.InternalTestVersions)
|
||||
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
|
||||
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
|
||||
@@ -3525,14 +3525,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
|
||||
return f.unTrash(ctx, dir, directoryID, true)
|
||||
}
|
||||
|
||||
// copy file with id to dest
|
||||
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
// copy or move file with id to dest
|
||||
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
|
||||
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't find id: %w", err)
|
||||
}
|
||||
if info.MimeType == driveFolderType {
|
||||
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
|
||||
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
|
||||
}
|
||||
info.Name = f.opt.Enc.ToStandardName(info.Name)
|
||||
o, err := f.newObjectWithInfo(ctx, info.Name, info)
|
||||
@@ -3553,9 +3553,15 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("copy failed: %w", err)
|
||||
|
||||
var opErr error
|
||||
if operation == "moveid" {
|
||||
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
|
||||
} else {
|
||||
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
|
||||
}
|
||||
if opErr != nil {
|
||||
return fmt.Errorf("%s failed: %w", operation, opErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -3792,6 +3798,28 @@ attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
|
||||
`,
|
||||
}, {
|
||||
Name: "moveid",
|
||||
Short: "Move files by ID",
|
||||
Long: `This command moves files by ID
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend moveid drive: ID path
|
||||
rclone backend moveid drive: ID1 path1 ID2 path2
|
||||
|
||||
It moves the drive file with ID given to the path (an rclone path which
|
||||
will be passed internally to rclone moveto).
|
||||
|
||||
The path should end with a / to indicate move the file as named to
|
||||
this directory. If it doesn't end with a / then the last path
|
||||
component will be used as the file name.
|
||||
|
||||
If the destination is a drive backend then server-side moving will be
|
||||
attempted if possible.
|
||||
|
||||
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
|
||||
`,
|
||||
}, {
|
||||
Name: "exportformats",
|
||||
Short: "Dump the export formats for debug purposes",
|
||||
@@ -3970,16 +3998,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
dir = arg[0]
|
||||
}
|
||||
return f.unTrashDir(ctx, dir, true)
|
||||
case "copyid":
|
||||
case "copyid", "moveid":
|
||||
if len(arg)%2 != 0 {
|
||||
return nil, errors.New("need an even number of arguments")
|
||||
}
|
||||
for len(arg) > 0 {
|
||||
id, dest := arg[0], arg[1]
|
||||
arg = arg[2:]
|
||||
err = f.copyID(ctx, id, dest)
|
||||
err = f.copyOrMoveID(ctx, name, id, dest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
|
||||
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
|
||||
@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
|
||||
require.NoError(t, f.Purge(ctx, "trashDir"))
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
|
||||
func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
|
||||
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
obj, err := f.NewObject(ctx, existingFile)
|
||||
require.NoError(t, err)
|
||||
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run("BadID", func(t *testing.T) {
|
||||
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "couldn't find id")
|
||||
})
|
||||
@@ -506,19 +506,31 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
t.Run("Directory", func(t *testing.T) {
|
||||
rootID, err := f.dirCache.RootID(ctx, false)
|
||||
require.NoError(t, err)
|
||||
err = f.copyID(ctx, rootID, dir+"/")
|
||||
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "can't copy directory")
|
||||
assert.Contains(t, err.Error(), "can't moveid directory")
|
||||
})
|
||||
|
||||
t.Run("WithoutDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/")
|
||||
t.Run("MoveWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("WithDestName", func(t *testing.T) {
|
||||
err = f.copyID(ctx, o.id, dir+"/potato.txt")
|
||||
t.Run("CopyWithoutDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
|
||||
require.NoError(t, err)
|
||||
checkFile(path.Base(existingFile))
|
||||
})
|
||||
|
||||
t.Run("MoveWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
|
||||
t.Run("CopyWithDestName", func(t *testing.T) {
|
||||
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
|
||||
require.NoError(t, err)
|
||||
checkFile("potato.txt")
|
||||
})
|
||||
@@ -647,7 +659,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
})
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
|
||||
@@ -2,6 +2,7 @@ package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -35,7 +36,7 @@ func TestIntegration(t *testing.T) {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(ctx, *fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
}
|
||||
|
||||
// build request
|
||||
// cant use normal rename as file needs to be "activated" first
|
||||
// can't use normal rename as file needs to be "activated" first
|
||||
|
||||
r := api.NewUpdateFileInfo()
|
||||
r.DocumentID = doc.DocumentID
|
||||
|
||||
@@ -75,7 +75,7 @@ type MoveFolderParam struct {
|
||||
DestinationPath string `validate:"nonzero" json:"destinationPath"`
|
||||
}
|
||||
|
||||
// JobIDResponse respresents response struct with JobID for folder operations
|
||||
// JobIDResponse represents response struct with JobID for folder operations
|
||||
type JobIDResponse struct {
|
||||
JobID string `json:"jobId"`
|
||||
}
|
||||
|
||||
@@ -151,6 +151,19 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
|
||||
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
|
||||
Default: "https://archive.org",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_metadata",
|
||||
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
|
||||
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
|
||||
Default: []string{},
|
||||
Hide: fs.OptionHideConfigurator,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "item_derive",
|
||||
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
|
||||
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
|
||||
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
|
||||
Default: true,
|
||||
}, {
|
||||
Name: "disable_checksum",
|
||||
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
|
||||
@@ -201,6 +214,8 @@ type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
FrontEndpoint string `config:"front_endpoint"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
ItemMetadata []string `config:"item_metadata"`
|
||||
ItemDerive bool `config:"item_derive"`
|
||||
WaitArchive fs.Duration `config:"wait_archive"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
@@ -790,17 +805,23 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
"x-amz-filemeta-rclone-update-track": updateTracker,
|
||||
|
||||
// we add some more headers for intuitive actions
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
|
||||
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
|
||||
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
|
||||
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
|
||||
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
|
||||
}
|
||||
|
||||
if size >= 0 {
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", size)
|
||||
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
|
||||
}
|
||||
|
||||
// This is IA's ITEM metadata, not file metadata
|
||||
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mdata fs.Metadata
|
||||
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err == nil && mdata != nil {
|
||||
@@ -863,6 +884,51 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
|
||||
metadataCounter := make(map[string]int)
|
||||
metadataValues := make(map[string][]string)
|
||||
|
||||
// First pass: count occurrences and collect values
|
||||
for _, v := range options.ItemMetadata {
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
|
||||
}
|
||||
key, value := parts[0], parts[1]
|
||||
metadataCounter[key]++
|
||||
metadataValues[key] = append(metadataValues[key], value)
|
||||
}
|
||||
|
||||
// Second pass: add headers with appropriate prefixes
|
||||
for key, count := range metadataCounter {
|
||||
if count == 1 {
|
||||
// Only one occurrence, use x-archive-meta-
|
||||
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
|
||||
} else {
|
||||
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
|
||||
for i, value := range metadataValues[key] {
|
||||
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.fs.opt.ItemDerive {
|
||||
headers["x-archive-queue-derive"] = "1"
|
||||
} else {
|
||||
headers["x-archive-queue-derive"] = "0"
|
||||
}
|
||||
|
||||
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
|
||||
|
||||
for k, v := range headers {
|
||||
if strings.HasPrefix(k, "x-archive-meta") {
|
||||
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return headers, nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
|
||||
@@ -131,7 +131,7 @@ func init() {
|
||||
Help: "Microsoft Cloud for US Government",
|
||||
}, {
|
||||
Value: regionDE,
|
||||
Help: "Microsoft Cloud Germany",
|
||||
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
|
||||
}, {
|
||||
Value: regionCN,
|
||||
Help: "Azure and Office 365 operated by Vnet Group in China",
|
||||
|
||||
@@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||
})
|
||||
// Set our own http client in the context
|
||||
ctx = oauthutil.Context(ctx, baseClient)
|
||||
// create a new oauth client, re-use the token source
|
||||
// create a new oauth client, reuse the token source
|
||||
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||
}
|
||||
|
||||
@@ -1343,7 +1343,7 @@ func init() {
|
||||
}, {
|
||||
Name: "endpoint",
|
||||
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "objects-us-east-1.dream.io",
|
||||
Help: "Dream Objects endpoint",
|
||||
@@ -1476,14 +1476,6 @@ func init() {
|
||||
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
|
||||
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
|
||||
Provider: "ArvanCloud",
|
||||
}, {
|
||||
Value: "br-se1.magaluobjects.com",
|
||||
Help: "Magalu BR Southeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}, {
|
||||
Value: "br-ne1.magaluobjects.com",
|
||||
Help: "Magalu BR Northeast 1 endpoint",
|
||||
Provider: "Magalu",
|
||||
}},
|
||||
}, {
|
||||
Name: "location_constraint",
|
||||
@@ -2122,13 +2114,16 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
|
||||
Help: "Standard storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: #todo
|
||||
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing new objects in Magalu.",
|
||||
Provider: "Magalu",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "STANDARD",
|
||||
Help: "Standard storage class",
|
||||
}, {
|
||||
Value: "GLACIER_IR",
|
||||
Help: "Glacier Instant Retrieval storage class",
|
||||
}},
|
||||
}, {
|
||||
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
|
||||
@@ -3344,7 +3339,7 @@ func setQuirks(opt *Options) {
|
||||
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
|
||||
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
|
||||
urlEncodeListings = true // URL encode the listings to help with control characters
|
||||
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
|
||||
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
|
||||
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
|
||||
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
|
||||
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
|
||||
@@ -3682,6 +3677,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.Provider == "IDrive" {
|
||||
f.features.SetTier = false
|
||||
}
|
||||
if opt.Provider == "AWS" {
|
||||
f.features.DoubleSlash = true
|
||||
}
|
||||
if opt.DirectoryMarkers {
|
||||
f.features.CanHaveEmptyDirectories = true
|
||||
}
|
||||
@@ -4153,7 +4151,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
opt.prefix += "/"
|
||||
}
|
||||
if !opt.findFile {
|
||||
if opt.directory != "" {
|
||||
if opt.directory != "" && (opt.prefix == "" && !bucket.IsAllSlashes(opt.directory) || opt.prefix != "" && !strings.HasSuffix(opt.directory, "/")) {
|
||||
opt.directory += "/"
|
||||
}
|
||||
}
|
||||
@@ -4250,14 +4248,18 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, opt.prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
fs.Logf(f, "Odd directory name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
// Trim one slash off the remote name
|
||||
remote, _ = strings.CutSuffix(remote, "/")
|
||||
if remote == "" || bucket.IsAllSlashes(remote) {
|
||||
remote += "/"
|
||||
}
|
||||
if opt.addBucket {
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &types.Object{Key: &remote}, nil, true)
|
||||
if err != nil {
|
||||
if err == errEndList {
|
||||
@@ -6057,7 +6059,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
|
||||
@@ -31,13 +31,29 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
}
|
||||
}
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
d := &smb2.Dialer{}
|
||||
if f.opt.UseKerberos {
|
||||
cl, err := getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spn := f.opt.SPN
|
||||
if spn == "" {
|
||||
spn = "cifs/" + f.opt.Host
|
||||
}
|
||||
|
||||
d.Initiator = &smb2.Krb5Initiator{
|
||||
Client: cl,
|
||||
TargetSPN: spn,
|
||||
}
|
||||
} else {
|
||||
d.Initiator = &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
session, err := d.DialConn(ctx, tconn, addr)
|
||||
|
||||
78
backend/smb/kerberos.go
Normal file
78
backend/smb/kerberos.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package smb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/jcmturner/gokrb5/v8/client"
|
||||
"github.com/jcmturner/gokrb5/v8/config"
|
||||
"github.com/jcmturner/gokrb5/v8/credentials"
|
||||
)
|
||||
|
||||
var (
|
||||
kerberosClient *client.Client
|
||||
kerberosErr error
|
||||
kerberosOnce sync.Once
|
||||
)
|
||||
|
||||
// getKerberosClient returns a Kerberos client that can be used to authenticate.
|
||||
func getKerberosClient() (*client.Client, error) {
|
||||
if kerberosClient == nil || kerberosErr == nil {
|
||||
kerberosOnce.Do(func() {
|
||||
kerberosClient, kerberosErr = createKerberosClient()
|
||||
})
|
||||
}
|
||||
|
||||
return kerberosClient, kerberosErr
|
||||
}
|
||||
|
||||
// createKerberosClient creates a new Kerberos client.
|
||||
func createKerberosClient() (*client.Client, error) {
|
||||
cfgPath := os.Getenv("KRB5_CONFIG")
|
||||
if cfgPath == "" {
|
||||
cfgPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment, falling back to the
|
||||
// default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
switch {
|
||||
case strings.Contains(ccachePath, ":"):
|
||||
parts := strings.SplitN(ccachePath, ":", 2)
|
||||
switch parts[0] {
|
||||
case "FILE":
|
||||
ccachePath = parts[1]
|
||||
case "DIR":
|
||||
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
|
||||
}
|
||||
case ccachePath == "":
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = "/tmp/krb5cc_" + u.Uid
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client.NewFromCCache(ccache, cfg)
|
||||
}
|
||||
@@ -76,6 +76,16 @@ authentication, and it often needs to be set for clusters. For example:
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "use_kerberos",
|
||||
Help: `Use Kerberos authentication.
|
||||
|
||||
If set, rclone will use Kerberos authentication instead of NTLM. This
|
||||
requires a valid Kerberos configuration and credentials cache to be
|
||||
available, either in the default locations or as specified by the
|
||||
KRB5_CONFIG and KRB5CCNAME environment variables.
|
||||
`,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -126,6 +136,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
UseKerberos bool `config:"use_kerberos"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package smb_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/smb"
|
||||
@@ -15,3 +16,13 @@ func TestIntegration(t *testing.T) {
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
krb5Dir := t.TempDir()
|
||||
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
|
||||
t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache"))
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestSMBKerberos:rclone",
|
||||
NilObject: (*smb.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ conversion into man pages etc.
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
from datetime import datetime
|
||||
|
||||
docpath = "docs/content"
|
||||
@@ -192,13 +193,23 @@ def main():
|
||||
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
|
||||
build_date = datetime.utcfromtimestamp(
|
||||
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
|
||||
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
|
||||
with open(outfile, "w") as out:
|
||||
out.write("""\
|
||||
%% rclone(1) User Manual
|
||||
%% Nick Craig-Wood
|
||||
%% %s
|
||||
|
||||
""" % build_date.strftime("%b %d, %Y"))
|
||||
# NAME
|
||||
|
||||
rclone - manage files on cloud storage
|
||||
|
||||
# SYNOPSIS
|
||||
|
||||
```
|
||||
%s
|
||||
```
|
||||
""" % (build_date.strftime("%b %d, %Y"), help_output))
|
||||
for doc in docs:
|
||||
contents = read_doc(doc)
|
||||
# Substitute the commands into doc.md
|
||||
|
||||
@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
logMap = map[string]string{}
|
||||
logs = []string{}
|
||||
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
||||
}
|
||||
match := logRe.FindSubmatch(line)
|
||||
if match == nil {
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
var hash, logMessage = string(match[1]), string(match[2])
|
||||
logMap[logMessage] = hash
|
||||
@@ -52,12 +52,12 @@ func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
// v1.54.0
|
||||
versionBytes, err := os.ReadFile("VERSION")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
}
|
||||
if versionBytes[0] == 'v' {
|
||||
versionBytes = versionBytes[1:]
|
||||
@@ -65,7 +65,7 @@ func main() {
|
||||
versionBytes = bytes.TrimSpace(versionBytes)
|
||||
semver := semver.New(string(versionBytes))
|
||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
|
||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||
for _, logMessage := range masterLogs {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const configFile = "../../fstest/test_all/config.yaml"
|
||||
|
||||
@@ -63,40 +63,40 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
|
||||
fs.Log(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
@@ -123,13 +123,13 @@ func sizeDiffers(a, b int64) bool {
|
||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -151,7 +151,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
|
||||
fs.Log(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
@@ -159,7 +159,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
|
||||
fs.Log(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
@@ -167,25 +167,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
var CompareFlag CompareOpt // for exclusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
@@ -232,7 +232,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,14 +284,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
|
||||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
|
||||
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
|
||||
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
|
||||
@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
// checkListing verifies that listing is not empty (unless resyncing)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
return nil
|
||||
|
||||
@@ -78,6 +78,15 @@ func Color(style string, s string) string {
|
||||
return style + s + terminal.Reset
|
||||
}
|
||||
|
||||
// ColorX handles terminal colors for bisync
|
||||
func ColorX(style string, s string) string {
|
||||
if !Colors {
|
||||
return s
|
||||
}
|
||||
terminal.Start()
|
||||
return style + s + terminal.Reset
|
||||
}
|
||||
|
||||
func encode(s string) string {
|
||||
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
|
||||
}
|
||||
|
||||
@@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
@@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Log(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
@@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Error(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
} else {
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
@@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
|
||||
fs.Error(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
|
||||
fs.Log(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
@@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
|
||||
fs.Infoc(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
@@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
|
||||
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
@@ -623,7 +623,7 @@ func (b *bisyncRun) checkSyntax() error {
|
||||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
|
||||
fs.Infoc(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
|
||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
|
||||
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
|
||||
if winningPath > 0 {
|
||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||
} else {
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined.")) //nolint:govet
|
||||
fs.Infoc(file, Color(terminal.RedFg, "A winner could not be determined."))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
// and either flag is sufficient without the other.
|
||||
func (b *bisyncRun) setResyncDefaults() {
|
||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) //nolint:govet
|
||||
fs.Debug(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if b.opt.ResyncMode != PreferNone {
|
||||
|
||||
@@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
[36m(05) :[0m [34mmove-listings empty-path1[0m
|
||||
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in synching all content to path2.[0m
|
||||
[36m(06) :[0m [34mtest 2. resync with empty path2, resulting in syncing all content to path2.[0m
|
||||
[36m(07) :[0m [34mpurge-children {path2/}[0m
|
||||
[36m(08) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
|
||||
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
4
cmd/bisync/testdata/test_resync/scenario.txt
vendored
@@ -1,6 +1,6 @@
|
||||
test resync
|
||||
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
|
||||
# 2. Resync with empty Path2, resulting in synching all content TO Path2
|
||||
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
|
||||
# 3. Exercise all of the various file difference scenarios during a resync:
|
||||
# File Path1 Path2 Expected action Who wins
|
||||
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
|
||||
@@ -17,7 +17,7 @@ purge-children {path1/}
|
||||
bisync resync
|
||||
move-listings empty-path1
|
||||
|
||||
test 2. resync with empty path2, resulting in synching all content to path2.
|
||||
test 2. resync with empty path2, resulting in syncing all content to path2.
|
||||
purge-children {path2/}
|
||||
bisync resync
|
||||
move-listings empty-path2
|
||||
|
||||
11
cmd/cmd.go
11
cmd/cmd.go
@@ -429,11 +429,12 @@ func initConfig() {
|
||||
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||
}
|
||||
|
||||
// Start the metrics server if configured
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
|
||||
// Start the metrics server if configured and not running the "rc" command
|
||||
if os.Args[1] != "rc" {
|
||||
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||
if err != nil {
|
||||
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Setup CPU profiling if desired
|
||||
|
||||
@@ -549,12 +549,12 @@ password to re-encrypt the config.
|
||||
|
||||
When |--password-command| is called to change the password then the
|
||||
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||
changing passwords programatically you can use the environment
|
||||
changing passwords programmatically you can use the environment
|
||||
variable to distinguish which password you must supply.
|
||||
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unecrypted config file being on the disk
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
|
||||
@@ -54,7 +54,7 @@ destination if there is one with the same name.
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
will cause the output to be written to standard output.
|
||||
|
||||
### Troublshooting
|
||||
### Troubleshooting
|
||||
|
||||
If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
|
||||
|
||||
@@ -194,7 +194,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
|
||||
return file.Chown(uid, gid)
|
||||
}
|
||||
|
||||
// Chtimes changes the acces time and modified time
|
||||
// Chtimes changes the access time and modified time
|
||||
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
|
||||
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
|
||||
return f.vfs.Chtimes(name, atime, mtime)
|
||||
|
||||
@@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
|
||||
symlinks. Rclone will use the handle of the underlying file as the NFS
|
||||
handle which improves performance. This sort of cache can't be backed
|
||||
up and restored as the underlying handles will change. This is Linux
|
||||
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
|
||||
You can run rclone with this extra permission by doing this to the
|
||||
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@ package proxy
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -217,8 +219,13 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
|
||||
return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err)
|
||||
}
|
||||
|
||||
// Add a config hash to ensure configs with different values have different names.
|
||||
// 5 characters length is 5*6 = 30 bits of base64
|
||||
md5sumBinary := md5.Sum([]byte(config.String()))
|
||||
configHash := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])[:5]
|
||||
|
||||
// base name of config on user name. This may appear in logs
|
||||
name := "proxy-" + user
|
||||
name := "proxy-" + user + "-" + configHash
|
||||
fsString := name + ":" + root
|
||||
|
||||
// Look for fs in the VFS cache
|
||||
|
||||
@@ -90,7 +90,8 @@ func TestRun(t *testing.T) {
|
||||
require.NotNil(t, entry.vfs)
|
||||
f := entry.vfs.Fs()
|
||||
require.NotNil(t, f)
|
||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
||||
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, len("proxy-"+testUser+"-")+5, len(f.Name()))
|
||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||
|
||||
// check it is in the cache
|
||||
@@ -108,7 +109,7 @@ func TestRun(t *testing.T) {
|
||||
vfs, vfsKey, err := p.Call(testUser, testPass, false)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check it is in the cache
|
||||
@@ -129,7 +130,7 @@ func TestRun(t *testing.T) {
|
||||
vfs, vfsKey, err = p.Call(testUser, testPass, false)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
@@ -173,7 +174,7 @@ func TestRun(t *testing.T) {
|
||||
require.NotNil(t, entry.vfs)
|
||||
f := entry.vfs.Fs()
|
||||
require.NotNil(t, f)
|
||||
assert.Equal(t, "proxy-"+testUser, f.Name())
|
||||
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
|
||||
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
|
||||
|
||||
// check it is in the cache
|
||||
@@ -195,7 +196,7 @@ func TestRun(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check it is in the cache
|
||||
@@ -216,7 +217,7 @@ func TestRun(t *testing.T) {
|
||||
vfs, vfsKey, err = p.Call(testUser, publicKeyString, true)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, vfs)
|
||||
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
|
||||
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
|
||||
assert.Equal(t, testUser, vfsKey)
|
||||
|
||||
// check cache is at the same level
|
||||
|
||||
@@ -52,7 +52,7 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
|
||||
for _, entry := range dirEntries {
|
||||
if entry.IsDir() {
|
||||
response = append(response, gofakes3.BucketInfo{
|
||||
Name: gofakes3.URLEncode(entry.Name()),
|
||||
Name: entry.Name(),
|
||||
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
|
||||
})
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetObject fetchs the object from the filesystem.
|
||||
// GetObject fetches the object from the filesystem.
|
||||
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
|
||||
_vfs, err := b.s.getVFS(ctx)
|
||||
if err != nil {
|
||||
@@ -227,7 +227,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
|
||||
}
|
||||
|
||||
return &gofakes3.Object{
|
||||
Name: gofakes3.URLEncode(objectName),
|
||||
Name: objectName,
|
||||
Hash: hash,
|
||||
Metadata: meta,
|
||||
Size: size,
|
||||
@@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
|
||||
}
|
||||
|
||||
fp := path.Join(bucketName, objectName)
|
||||
// S3 does not report an error when attemping to delete a key that does not exist, so
|
||||
// S3 does not report an error when attempting to delete a key that does not exist, so
|
||||
// we need to skip IsNotExist errors.
|
||||
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
|
||||
@@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
for _, entry := range dirEntries {
|
||||
object := entry.Name()
|
||||
|
||||
// workround for control-chars detect
|
||||
// workaround for control-chars detect
|
||||
objectPath := path.Join(fdPath, object)
|
||||
|
||||
if !strings.HasPrefix(object, name) {
|
||||
@@ -28,7 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
|
||||
if entry.IsDir() {
|
||||
if addPrefix {
|
||||
response.AddPrefix(gofakes3.URLEncode(objectPath))
|
||||
response.AddPrefix(objectPath)
|
||||
continue
|
||||
}
|
||||
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
|
||||
@@ -37,7 +37,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
|
||||
}
|
||||
} else {
|
||||
item := &gofakes3.Content{
|
||||
Key: gofakes3.URLEncode(objectPath),
|
||||
Key: objectPath,
|
||||
LastModified: gofakes3.NewContentTime(entry.ModTime()),
|
||||
ETag: getFileHash(entry),
|
||||
Size: entry.Size(),
|
||||
|
||||
@@ -69,7 +69,7 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](#bugs) which will be fixed in due course.
|
||||
|
||||
### Bugs
|
||||
|
||||
@@ -3,10 +3,13 @@ package version
|
||||
|
||||
import (
|
||||
"context"
|
||||
"debug/buildinfo"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -20,12 +23,14 @@ import (
|
||||
|
||||
var (
|
||||
check = false
|
||||
deps = false
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &check, "check", "", false, "Check for new version", "")
|
||||
flags.BoolVarP(cmdFlags, &deps, "deps", "", false, "Show the Go dependencies", "")
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -67,18 +72,25 @@ Or
|
||||
beta: 1.42.0.5 (released 2018-06-17)
|
||||
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
||||
|
||||
If you supply the --deps flag then rclone will print a list of all the
|
||||
packages it depends on and their versions along with some other
|
||||
information about the build.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
if deps {
|
||||
return printDependencies()
|
||||
}
|
||||
if check {
|
||||
CheckVersion(ctx)
|
||||
} else {
|
||||
cmd.ShowVersion()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -151,3 +163,36 @@ func CheckVersion(ctx context.Context) {
|
||||
fmt.Println("Your version is compiled from git so comparisons may be wrong.")
|
||||
}
|
||||
}
|
||||
|
||||
// Print info about a build module
|
||||
func printModule(module *debug.Module) {
|
||||
if module.Replace != nil {
|
||||
fmt.Printf("- %s %s (replaced by %s %s)\n",
|
||||
module.Path, module.Version, module.Replace.Path, module.Replace.Version)
|
||||
} else {
|
||||
fmt.Printf("- %s %s\n", module.Path, module.Version)
|
||||
}
|
||||
}
|
||||
|
||||
// printDependencies shows the packages we use in a format like go.mod
|
||||
func printDependencies() error {
|
||||
info, err := buildinfo.ReadFile(os.Args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading build info: %w", err)
|
||||
}
|
||||
fmt.Println("Go Version:")
|
||||
fmt.Printf("- %s\n", info.GoVersion)
|
||||
fmt.Println("Main package:")
|
||||
printModule(&info.Main)
|
||||
fmt.Println("Binary path:")
|
||||
fmt.Printf("- %s\n", info.Path)
|
||||
fmt.Println("Settings:")
|
||||
for _, setting := range info.Settings {
|
||||
fmt.Printf("- %s: %s\n", setting.Key, setting.Value)
|
||||
}
|
||||
fmt.Println("Dependencies:")
|
||||
for _, dep := range info.Deps {
|
||||
printModule(dep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -925,3 +925,16 @@ put them back in again.` >}}
|
||||
* ToM <thomas.faucher@bibliosansfrontieres.org>
|
||||
* TAKEI Yuya <853320+takei-yuya@users.noreply.github.com>
|
||||
* Francesco Frassinelli <fraph24@gmail.com> <francesco.frassinelli@nina.no>
|
||||
* Matt Ickstadt <mattico8@gmail.com> <matt@beckenterprises.com>
|
||||
* Spencer McCullough <mccullough.spencer@gmail.com>
|
||||
* Jonathan Giannuzzi <jonathan@giannuzzi.me>
|
||||
* Christoph Berger <github@christophberger.com>
|
||||
* Tim White <tim.white@su.org.au>
|
||||
* Robin Schneider <robin.schneider@stackit.cloud>
|
||||
* izouxv <izouxv@users.noreply.github.com>
|
||||
* Moises Lima <mozlima@users.noreply.github.com>
|
||||
* Bruno Fernandes <bruno.fernandes1996@hotmail.com>
|
||||
* Corentin Barreau <corentin@archive.org>
|
||||
* hiddenmarten <hiddenmarten@gmail.com>
|
||||
* Trevor Starick <trevor.starick@gmail.com>
|
||||
* b-wimmer <132347192+b-wimmer@users.noreply.github.com>
|
||||
|
||||
@@ -938,8 +938,9 @@ You can set custom upload headers with the `--header-upload` flag.
|
||||
- Content-Encoding
|
||||
- Content-Language
|
||||
- Content-Type
|
||||
- X-MS-Tags
|
||||
|
||||
Eg `--header-upload "Content-Type: text/potato"`
|
||||
Eg `--header-upload "Content-Type: text/potato"` or `--header-upload "X-MS-Tags: foo=bar"`
|
||||
|
||||
## Limitations
|
||||
|
||||
|
||||
@@ -206,6 +206,13 @@ If the resource has multiple user-assigned identities you will need to
|
||||
unset `env_auth` and set `use_msi` instead. See the [`use_msi`
|
||||
section](#use_msi).
|
||||
|
||||
If you are operating in disconnected clouds, or private clouds such as
|
||||
Azure Stack you may want to set `disable_instance_discovery = true`.
|
||||
This determines whether rclone requests Microsoft Entra instance
|
||||
metadata from `https://login.microsoft.com/` before authenticating.
|
||||
Setting this to `true` will skip this request, making you responsible
|
||||
for ensuring the configured authority is valid and trustworthy.
|
||||
|
||||
##### Env Auth: 3. Azure CLI credentials (as used by the az tool)
|
||||
|
||||
Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
@@ -288,6 +295,13 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
||||
|
||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
a System Managed Identity that you do not want to use.
|
||||
Don't set `env_auth` at the same time.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azurefiles/azurefiles.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
@@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
|
||||
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
|
||||
documentation.
|
||||
An [example filters file](#example-filters-file) contains filters for
|
||||
non-allowed files for synching with Dropbox.
|
||||
non-allowed files for syncing with Dropbox.
|
||||
|
||||
If you make changes to your filters file then bisync requires a run
|
||||
with `--resync`. This is a safety feature, which prevents existing files
|
||||
@@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
|
||||
sync run times for very large numbers of files.
|
||||
|
||||
The check may be run manually with `--check-sync=only`. It runs only the
|
||||
integrity check and terminates without actually synching.
|
||||
integrity check and terminates without actually syncing.
|
||||
|
||||
Note that currently, `--check-sync` **only checks listing snapshots and NOT the
|
||||
actual files on the remotes.** Note also that the listing snapshots will not
|
||||
@@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
|
||||
|
||||
### How to filter directories
|
||||
|
||||
Filtering portions of the directory tree is a critical feature for synching.
|
||||
Filtering portions of the directory tree is a critical feature for syncing.
|
||||
|
||||
Examples of directory trees (always beneath the Path1/Path2 root level)
|
||||
you may want to exclude from your sync:
|
||||
@@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
|
||||
|
||||
## Example exclude-style filters files for use with Dropbox {#exclude-filters}
|
||||
|
||||
- Dropbox disallows synching the listed temporary and configuration/data files.
|
||||
- Dropbox disallows syncing the listed temporary and configuration/data files.
|
||||
The `- <filename>` filters exclude these files where ever they may occur
|
||||
in the sync tree. Consider adding similar exclusions for file types
|
||||
you don't need to sync, such as core dump and software build files.
|
||||
@@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
||||
|
||||
- `go test . -case basic -remote local -remote2 local`
|
||||
runs the `test_basic` test case using only the local filesystem,
|
||||
synching one local directory with another local directory.
|
||||
syncing one local directory with another local directory.
|
||||
Test script output is to the console, while commands within scenario.txt
|
||||
have their output sent to the `.../workdir/test.log` file,
|
||||
which is finally compared to the golden copy.
|
||||
@@ -1860,4 +1860,4 @@ causing bisync to consider more files than necessary due to overbroad filters du
|
||||
* Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes)
|
||||
to distinguish from `--ignore-checksum`
|
||||
* [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes
|
||||
* Documentation and testing improvements
|
||||
* Documentation and testing improvements
|
||||
|
||||
@@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
|
||||
chunk read making sure each nonce is unique for each block written.
|
||||
The chance of a nonce being reused is minuscule. If you wrote an
|
||||
exabyte of data (10¹⁸ bytes) you would have a probability of
|
||||
approximately 2×10⁻³² of re-using a nonce.
|
||||
approximately 2×10⁻³² of reusing a nonce.
|
||||
|
||||
#### Chunk
|
||||
|
||||
|
||||
@@ -619,6 +619,11 @@ it to `false`. It is also possible to specify `--boolean=false` or
|
||||
parsed as `--boolean` and the `false` is parsed as an extra command
|
||||
line argument for rclone.
|
||||
|
||||
Options documented to take a `stringArray` parameter accept multiple
|
||||
values. To pass more than one value, repeat the option; for example:
|
||||
`--include value1 --include value2`.
|
||||
|
||||
|
||||
### Time or duration options {#time-option}
|
||||
|
||||
TIME or DURATION options can be specified as a duration string or a
|
||||
@@ -2930,7 +2935,7 @@ so they take exactly the same form.
|
||||
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
|
||||
|
||||
Options that can appear multiple times (type `stringArray`) are
|
||||
treated slighly differently as environment variables can only be
|
||||
treated slightly differently as environment variables can only be
|
||||
defined once. In order to allow a simple mechanism for adding one or
|
||||
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
|
||||
string. For example
|
||||
|
||||
@@ -9,6 +9,8 @@ description: "Rclone Global Flags"
|
||||
This describes the global flags available to every rclone command
|
||||
split into groups.
|
||||
|
||||
See the [Options section](/docs/#options) for syntax and usage advice.
|
||||
|
||||
|
||||
## Copy
|
||||
|
||||
|
||||
@@ -384,7 +384,7 @@ Use the gphotosdl proxy for downloading the full resolution images
|
||||
The Google API will deliver images and video which aren't full
|
||||
resolution, and/or have EXIF data missing.
|
||||
|
||||
However if you ue the gphotosdl proxy tnen you can download original,
|
||||
However if you use the gphotosdl proxy then you can download original,
|
||||
unchanged images.
|
||||
|
||||
This runs a headless browser in the background.
|
||||
|
||||
@@ -61,7 +61,7 @@ Enter a value.
|
||||
config_2fa> 2FACODE
|
||||
Remote config
|
||||
--------------------
|
||||
[koofr]
|
||||
[iclouddrive]
|
||||
- type: iclouddrive
|
||||
- apple_id: APPLEID
|
||||
- password: *** ENCRYPTED ***
|
||||
@@ -78,6 +78,20 @@ y/e/d> y
|
||||
|
||||
ADP is currently unsupported and need to be disabled
|
||||
|
||||
On iPhone, Settings `>` Apple Account `>` iCloud `>` 'Access iCloud Data on the Web' must be ON, and 'Advanced Data Protection' OFF.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Missing PCS cookies from the request
|
||||
|
||||
This means you have Advanced Data Protection (ADP) turned on. This is not supported at the moment. If you want to use rclone you will have to turn it off. See above for how to turn it off.
|
||||
|
||||
You will need to clear the `cookies` and the `trust_token` fields in the config. Or you can delete the remote config and start again.
|
||||
|
||||
You should then run `rclone reconnect remote:`.
|
||||
|
||||
Note that changing the ADP setting may not take effect immediately - you may need to wait a few hours or a day before you can get rclone to work - keep clearing the config entry and running `rclone reconnect remote:` until rclone functions properly.
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
|
||||
@@ -333,7 +333,7 @@ Note that this is controlled by [community maintainer](https://github.com/bouken
|
||||
## Source installation {#source}
|
||||
|
||||
Make sure you have git and [Go](https://golang.org/) installed.
|
||||
Go version 1.18 or newer is required, the latest release is recommended.
|
||||
Go version 1.22 or newer is required, the latest release is recommended.
|
||||
You can get it from your package manager, or download it from
|
||||
[golang.org/dl](https://golang.org/dl/). Then you can run the following:
|
||||
|
||||
|
||||
@@ -936,6 +936,28 @@ See the [metadata](/docs/#metadata) docs for more info.
|
||||
|
||||
{{< rem autogenerated options stop >}}
|
||||
|
||||
### Impersonate other users as Admin
|
||||
|
||||
Unlike Google Drive and impersonating any domain user via service accounts, OneDrive requires you to authenticate as an admin account, and manually setup a remote per user you wish to impersonate.
|
||||
|
||||
1. In [Microsoft 365 Admin Center](https://admin.microsoft.com), open each user you need to "impersonate" and go to the OneDrive section. There is a heading called "Get access to files", you need to click to create the link, this creates the link of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/` but also changes the permissions so you your admin user has access.
|
||||
2. Then in powershell run the following commands:
|
||||
```console
|
||||
Install-Module Microsoft.Graph -Scope CurrentUser -Repository PSGallery -Force
|
||||
Import-Module Microsoft.Graph.Files
|
||||
Connect-MgGraph -Scopes "Files.ReadWrite.All"
|
||||
# Follow the steps to allow access to your admin user
|
||||
# Then run this for each user you want to impersonate to get the Drive ID
|
||||
Get-MgUserDefaultDrive -UserId '{emailaddress}'
|
||||
# This will give you output of the format:
|
||||
# Name Id DriveType CreatedDateTime
|
||||
# ---- -- --------- ---------------
|
||||
# OneDrive b!XYZ123 business 14/10/2023 1:00:58 pm
|
||||
|
||||
```
|
||||
3. Then in rclone add a onedrive remote type, and use the `Type in driveID` with the DriveID you got in the previous step. One remote per user. It will then confirm the drive ID, and hopefully give you a message of `Found drive "root" of type "business"` and then include the URL of the format `https://{tenant}-my.sharepoint.com/personal/{user_name_domain_tld}/Documents`
|
||||
|
||||
|
||||
## Limitations
|
||||
|
||||
If you don't use rclone for 90 days the refresh token will
|
||||
|
||||
@@ -2068,7 +2068,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The `expiry` time is the time until the file is elegible for being
|
||||
The `expiry` time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers `--transfers` files at once, only the lowest
|
||||
`--transfers` expiry times will have `uploading` as `true`. So there
|
||||
|
||||
@@ -31,7 +31,7 @@ The S3 backend can be used with a number of different providers:
|
||||
{{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}}
|
||||
{{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}}
|
||||
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_http/" config="/s3/#rclone" >}}
|
||||
{{< provider name="Rclone Serve S3" home="/commands/rclone_serve_s3/" config="/s3/#rclone" >}}
|
||||
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
|
||||
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
|
||||
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
|
||||
@@ -750,7 +750,7 @@ Notes on above:
|
||||
that `USER_NAME` has been created.
|
||||
2. The Resource entry must include both resource ARNs, as one implies
|
||||
the bucket and the other implies the bucket's objects.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
|
||||
|
||||
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
|
||||
that will generate one or more buckets that will work with `rclone sync`.
|
||||
@@ -3728,7 +3728,7 @@ location_constraint = au-nsw
|
||||
### Rclone Serve S3 {#rclone}
|
||||
|
||||
Rclone can serve any remote over the S3 protocol. For details see the
|
||||
[rclone serve s3](/commands/rclone_serve_http/) documentation.
|
||||
[rclone serve s3](/commands/rclone_serve_s3/) documentation.
|
||||
|
||||
For example, to serve `remote:path` over s3, run the server like this:
|
||||
|
||||
@@ -3748,8 +3748,8 @@ secret_access_key = SECRET_ACCESS_KEY
|
||||
use_multipart_uploads = false
|
||||
```
|
||||
|
||||
Note that setting `disable_multipart_uploads = true` is to work around
|
||||
[a bug](/commands/rclone_serve_http/#bugs) which will be fixed in due course.
|
||||
Note that setting `use_multipart_uploads = false` is to work around
|
||||
[a bug](/commands/rclone_serve_s3/#bugs) which will be fixed in due course.
|
||||
|
||||
### Scaleway
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.69.0
|
||||
v1.70.0
|
||||
4
fs/cache/cache_test.go
vendored
4
fs/cache/cache_test.go
vendored
@@ -131,7 +131,7 @@ func TestPutErr(t *testing.T) {
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
fNew, err := GetFn(context.Background(), "mock:/", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
@@ -141,7 +141,7 @@ func TestPutErr(t *testing.T) {
|
||||
PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile)
|
||||
|
||||
fNew, err = GetFn(context.Background(), "mock:/file.txt", create)
|
||||
require.Equal(t, fs.ErrorNotFoundInConfigFile, err)
|
||||
require.True(t, errors.Is(err, fs.ErrorNotFoundInConfigFile))
|
||||
require.Equal(t, f, fNew)
|
||||
|
||||
assert.Equal(t, 1, Entries())
|
||||
|
||||
@@ -39,6 +39,7 @@ type Features struct {
|
||||
NoMultiThreading bool // set if can't have multiplethreads on one download open
|
||||
Overlay bool // this wraps one or more backends to add functionality
|
||||
ChunkWriterDoesntSeek bool // set if the chunk writer doesn't need to read the data more than once
|
||||
DoubleSlash bool // set if backend supports double slashes in paths
|
||||
|
||||
// Purge all files in the directory specified
|
||||
//
|
||||
@@ -383,6 +384,8 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
|
||||
ft.PartialUploads = ft.PartialUploads && mask.PartialUploads
|
||||
ft.NoMultiThreading = ft.NoMultiThreading && mask.NoMultiThreading
|
||||
// ft.Overlay = ft.Overlay && mask.Overlay don't propagate Overlay
|
||||
ft.ChunkWriterDoesntSeek = ft.ChunkWriterDoesntSeek && mask.ChunkWriterDoesntSeek
|
||||
ft.DoubleSlash = ft.DoubleSlash && mask.DoubleSlash
|
||||
|
||||
if mask.Purge == nil {
|
||||
ft.Purge = nil
|
||||
|
||||
@@ -133,7 +133,7 @@ func TestCertificates(t *testing.T) {
|
||||
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
|
||||
}
|
||||
|
||||
// Write some test data to fullfil the request
|
||||
// Write some test data to fulfill the request
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
_, _ = fmt.Fprintln(w, "test data")
|
||||
}))
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
)
|
||||
|
||||
// DirSorted reads Object and *Dir into entries for the given Fs.
|
||||
@@ -43,7 +44,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
||||
newEntries = entries[:0] // in place filter
|
||||
prefix := ""
|
||||
if dir != "" {
|
||||
prefix = dir + "/"
|
||||
prefix = dir
|
||||
if !bucket.IsAllSlashes(dir) {
|
||||
prefix += "/"
|
||||
}
|
||||
}
|
||||
for _, entry := range entries {
|
||||
ok := true
|
||||
@@ -77,10 +81,10 @@ func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll boo
|
||||
case !strings.HasPrefix(remote, prefix):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir)
|
||||
case remote == prefix:
|
||||
case remote == dir:
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir)
|
||||
case strings.ContainsRune(remote[len(prefix):], '/'):
|
||||
case strings.ContainsRune(remote[len(prefix):], '/') && !bucket.IsAllSlashes(remote[len(prefix):]):
|
||||
ok = false
|
||||
fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir)
|
||||
default:
|
||||
|
||||
@@ -49,7 +49,8 @@ func TestFilterAndSortIncludeAll(t *testing.T) {
|
||||
|
||||
func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
// Check the different kinds of error when listing "dir"
|
||||
da := mockdir.New("dir/")
|
||||
da := mockdir.New("dir")
|
||||
da2 := mockdir.New("dir/") // double slash dir - allowed for bucket based remotes
|
||||
oA := mockobject.Object("diR/a")
|
||||
db := mockdir.New("dir/b")
|
||||
oB := mockobject.Object("dir/B/sub")
|
||||
@@ -57,18 +58,19 @@ func TestFilterAndSortCheckDir(t *testing.T) {
|
||||
oC := mockobject.Object("dir/C")
|
||||
dd := mockdir.New("dir/d")
|
||||
oD := mockobject.Object("dir/D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "dir", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
fs.DirEntries{da2, oC, oD, db, dc, dd},
|
||||
newEntries,
|
||||
fs.DirEntries{oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
// Check the different kinds of error when listing the root ""
|
||||
da := mockdir.New("")
|
||||
da2 := mockdir.New("/") // doubleslash dir allowed on bucket based remotes
|
||||
oA := mockobject.Object("A")
|
||||
db := mockdir.New("b")
|
||||
oB := mockobject.Object("B/sub")
|
||||
@@ -76,12 +78,12 @@ func TestFilterAndSortCheckDirRoot(t *testing.T) {
|
||||
oC := mockobject.Object("C")
|
||||
dd := mockdir.New("d")
|
||||
oD := mockobject.Object("D")
|
||||
entries := fs.DirEntries{da, oA, db, oB, dc, oC, dd, oD}
|
||||
entries := fs.DirEntries{da, da2, oA, db, oB, dc, oC, dd, oD}
|
||||
newEntries, err := filterAndSortDir(context.Background(), entries, true, "", nil, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t,
|
||||
fs.DirEntries{da2, oA, oC, oD, db, dc, dd},
|
||||
newEntries,
|
||||
fs.DirEntries{oA, oC, oD, db, dc, dd},
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ func LogValueHide(key string, value interface{}) LogValueItem {
|
||||
return LogValueItem{key: key, value: value, render: false}
|
||||
}
|
||||
|
||||
// String returns the representation of value. If render is fals this
|
||||
// String returns the representation of value. If render is false this
|
||||
// is an empty string so LogValueItem entries won't show in the
|
||||
// textual representation of logs.
|
||||
func (j LogValueItem) String() string {
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -104,7 +105,7 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, conne
|
||||
m := ConfigMap("", nil, configName, parsed.Config)
|
||||
fsName, ok = m.Get("type")
|
||||
if !ok {
|
||||
return nil, "", "", nil, ErrorNotFoundInConfigFile
|
||||
return nil, "", "", nil, fmt.Errorf("%w (%q)", ErrorNotFoundInConfigFile, configName)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -297,7 +297,7 @@ func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.R
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// This re-uses the internal buffer if at all possible.
|
||||
// This reuses the internal buffer if at all possible.
|
||||
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
size := src.Size()
|
||||
if size == 0 {
|
||||
|
||||
@@ -142,7 +142,7 @@ func TestMemoryObject(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
checkContent(o, "Rutabaga")
|
||||
assert.Equal(t, newNow, o.ModTime(context.Background()))
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer
|
||||
assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
|
||||
|
||||
// not within the buffer
|
||||
newStr := "0123456789"
|
||||
|
||||
@@ -358,7 +358,7 @@ func TestRemoteServing(t *testing.T) {
|
||||
URL: "[notfoundremote:]/",
|
||||
Status: http.StatusInternalServerError,
|
||||
Expected: `{
|
||||
"error": "failed to make Fs: didn't find section in config file",
|
||||
"error": "failed to make Fs: didn't find section in config file (\"notfoundremote\")",
|
||||
"input": null,
|
||||
"path": "/",
|
||||
"status": 500
|
||||
|
||||
@@ -726,7 +726,7 @@ func (s *syncCopyMove) markParentNotEmpty(entry fs.DirEntry) {
|
||||
parentDir = ""
|
||||
}
|
||||
delete(s.srcEmptyDirs, parentDir)
|
||||
if parentDir == "" {
|
||||
if parentDir == "" || parentDir == "/" {
|
||||
break
|
||||
}
|
||||
parentDir = path.Dir(parentDir)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
//go:build !go1.21
|
||||
//go:build !go1.22
|
||||
|
||||
package fs
|
||||
|
||||
// Upgrade to Go version 1.21 to compile rclone - latest stable go
|
||||
// Upgrade to Go version 1.22 to compile rclone - latest stable go
|
||||
// compiler recommended.
|
||||
func init() { Go_version_1_21_required_for_compilation() }
|
||||
func init() { Go_version_1_22_required_for_compilation() }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.69.0"
|
||||
var VersionTag = "v1.70.0"
|
||||
|
||||
@@ -459,7 +459,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName)
|
||||
require.NoError(t, err)
|
||||
f, err = fs.NewFs(context.Background(), subRemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
|
||||
t.Logf("Didn't find %q in config file - skipping tests", remoteName)
|
||||
return
|
||||
}
|
||||
@@ -2121,6 +2121,144 @@ func Run(t *testing.T, opt *Opt) {
|
||||
}
|
||||
})
|
||||
|
||||
// Run tests for bucket based Fs
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Bucket
|
||||
t.Run("Bucket", func(t *testing.T) {
|
||||
// Test if this Fs is bucket based - this test won't work for wrapped bucket based backends.
|
||||
if !f.Features().BucketBased {
|
||||
t.Skip("Not a bucket based backend")
|
||||
}
|
||||
if f.Features().CanHaveEmptyDirectories {
|
||||
t.Skip("Can have empty directories")
|
||||
}
|
||||
if !f.Features().DoubleSlash {
|
||||
t.Skip("Can't have // in paths")
|
||||
}
|
||||
// Create some troublesome file names
|
||||
fileNames := []string{
|
||||
file1.Path,
|
||||
file2.Path,
|
||||
".leadingdot",
|
||||
"/.leadingdot",
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
"dir/.leadingdot",
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}
|
||||
dirNames := []string{
|
||||
"hello? sausage",
|
||||
"hello? sausage/êé",
|
||||
"hello? sausage/êé/Hello, 世界",
|
||||
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||
"/",
|
||||
"//",
|
||||
"///",
|
||||
"dir",
|
||||
"dir/",
|
||||
"dir//",
|
||||
}
|
||||
t1 := fstest.Time("2003-02-03T04:05:06.499999999Z")
|
||||
var objs []fs.Object
|
||||
for _, fileName := range fileNames[2:] {
|
||||
contents := "bad file name: " + fileName
|
||||
file := fstest.NewItem(fileName, contents, t1)
|
||||
objs = append(objs, PutTestContents(ctx, t, f, &file, contents, true))
|
||||
}
|
||||
|
||||
// Check they arrived
|
||||
// This uses walk.Walk with a max size set to make sure we don't use ListR
|
||||
check := func(f fs.Fs, dir string, wantFileNames, wantDirNames []string) {
|
||||
t.Helper()
|
||||
var gotFileNames, gotDirNames []string
|
||||
require.NoError(t, walk.Walk(ctx, f, dir, true, 100, func(path string, entries fs.DirEntries, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if _, isObj := entry.(fs.Object); isObj {
|
||||
gotFileNames = append(gotFileNames, entry.Remote())
|
||||
} else {
|
||||
gotDirNames = append(gotDirNames, entry.Remote())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
sort.Strings(wantDirNames)
|
||||
sort.Strings(wantFileNames)
|
||||
sort.Strings(gotDirNames)
|
||||
sort.Strings(gotFileNames)
|
||||
assert.Equal(t, wantFileNames, gotFileNames)
|
||||
assert.Equal(t, wantDirNames, gotDirNames)
|
||||
}
|
||||
check(f, "", fileNames, dirNames)
|
||||
check(f, "/", []string{
|
||||
"/.leadingdot",
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
}, []string{
|
||||
"//",
|
||||
"///",
|
||||
})
|
||||
check(f, "//", []string{
|
||||
"///tripleslash",
|
||||
"//doubleslash",
|
||||
}, []string{
|
||||
"///",
|
||||
})
|
||||
check(f, "dir", []string{
|
||||
"dir/.leadingdot",
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}, []string{
|
||||
"dir/",
|
||||
"dir//",
|
||||
})
|
||||
check(f, "dir/", []string{
|
||||
"dir///tripleslash",
|
||||
"dir//doubleslash",
|
||||
}, []string{
|
||||
"dir//",
|
||||
})
|
||||
check(f, "dir//", []string{
|
||||
"dir///tripleslash",
|
||||
}, nil)
|
||||
|
||||
// Now create a backend not at the root of a bucket
|
||||
f2, err := fs.NewFs(ctx, subRemoteName+"/dir")
|
||||
require.NoError(t, err)
|
||||
check(f2, "", []string{
|
||||
".leadingdot",
|
||||
"//tripleslash",
|
||||
"/doubleslash",
|
||||
}, []string{
|
||||
"/",
|
||||
"//",
|
||||
})
|
||||
check(f2, "/", []string{
|
||||
"//tripleslash",
|
||||
"/doubleslash",
|
||||
}, []string{
|
||||
"//",
|
||||
})
|
||||
check(f2, "//", []string{
|
||||
"//tripleslash",
|
||||
}, []string(nil))
|
||||
|
||||
// Remove the objects
|
||||
for _, obj := range objs {
|
||||
assert.NoError(t, obj.Remove(ctx))
|
||||
}
|
||||
|
||||
// Check they are gone
|
||||
fstest.CheckListingWithPrecision(t, f, []fstest.Item{file1, file2}, []string{
|
||||
"hello? sausage",
|
||||
"hello? sausage/êé",
|
||||
"hello? sausage/êé/Hello, 世界",
|
||||
"hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠",
|
||||
}, fs.GetModifyWindow(ctx, f))
|
||||
})
|
||||
|
||||
// State of remote at the moment the internal tests are called
|
||||
InternalTestFiles = []fstest.Item{file1, file2}
|
||||
|
||||
@@ -2391,7 +2529,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
var itemCopy = item
|
||||
itemCopy.Path += ".copy"
|
||||
|
||||
// Set copy cutoff to mininum value so we make chunks
|
||||
// Set copy cutoff to minimum value so we make chunks
|
||||
origCutoff, err := do.SetCopyCutoff(minChunkSize)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// Test describes an integration test to run with `go test`
|
||||
|
||||
@@ -484,6 +484,9 @@ backends:
|
||||
- backend: "smb"
|
||||
remote: "TestSMB:rclone"
|
||||
fastlist: false
|
||||
- backend: "smb"
|
||||
remote: "TestSMBKerberos:rclone"
|
||||
fastlist: false
|
||||
- backend: "storj"
|
||||
remote: "TestStorj:"
|
||||
fastlist: true
|
||||
|
||||
@@ -26,6 +26,8 @@ They should be bound to localhost so they are not accessible externally.
|
||||
| 28630 | TestSMB |
|
||||
| 28631 | TestFTPProftpd |
|
||||
| 28632 | TestSwiftAIOsegments |
|
||||
| 28633 | TestSMBKerberos |
|
||||
| 28634 | TestSMBKerberos |
|
||||
| 38081 | TestWebdavOwncloud |
|
||||
|
||||
## Non localhost tests
|
||||
|
||||
74
fstest/testserver/init.d/TestSMBKerberos
Executable file
74
fstest/testserver/init.d/TestSMBKerberos
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
IMAGE=rclone/test-smb-kerberos
|
||||
NAME=smb-kerberos
|
||||
USER=rclone
|
||||
DOMAIN=RCLONE
|
||||
REALM=RCLONE.LOCAL
|
||||
SMB_PORT=28633
|
||||
KRB5_PORT=28634
|
||||
|
||||
. $(dirname "$0")/docker.bash
|
||||
|
||||
start() {
|
||||
docker build -t ${IMAGE} - <<EOF
|
||||
FROM alpine:3.21
|
||||
RUN apk add --no-cache samba-dc
|
||||
RUN rm -rf /etc/samba/smb.conf /var/lib/samba \
|
||||
&& mkdir -p /var/lib/samba/private \
|
||||
&& samba-tool domain provision \
|
||||
--use-rfc2307 \
|
||||
--option acl_xattr:security_acl_name=user.NTACL \
|
||||
--realm=$REALM \
|
||||
--domain=$DOMAIN \
|
||||
--server-role=dc \
|
||||
--dns-backend=SAMBA_INTERNAL \
|
||||
--host-name=localhost \
|
||||
&& samba-tool user add --random-password $USER \
|
||||
&& mkdir -m 777 /share /rclone \
|
||||
&& cat <<EOS >> /etc/samba/smb.conf
|
||||
[public]
|
||||
path = /share
|
||||
browseable = yes
|
||||
read only = yes
|
||||
guest ok = yes
|
||||
[rclone]
|
||||
path = /rclone
|
||||
browseable = yes
|
||||
read only = no
|
||||
guest ok = no
|
||||
valid users = rclone
|
||||
EOS
|
||||
CMD ["samba", "-i"]
|
||||
EOF
|
||||
|
||||
docker run --rm -d --name ${NAME} \
|
||||
-p 127.0.0.1:${SMB_PORT}:445 \
|
||||
-p 127.0.0.1:${SMB_PORT}:445/udp \
|
||||
-p 127.0.0.1:${KRB5_PORT}:88 \
|
||||
${IMAGE}
|
||||
|
||||
# KRB5_CONFIG and KRB5CCNAME are set by the caller
|
||||
cat > ${KRB5_CONFIG} <<EOF
|
||||
[libdefaults]
|
||||
default_realm = ${REALM}
|
||||
[realms]
|
||||
${REALM} = {
|
||||
kdc = localhost
|
||||
}
|
||||
EOF
|
||||
docker cp ${KRB5_CONFIG} ${NAME}:/etc/krb5.conf
|
||||
docker exec ${NAME} samba-tool user get-kerberos-ticket rclone --output-krb5-ccache=/tmp/ccache
|
||||
docker cp ${NAME}:/tmp/ccache ${KRB5CCNAME}
|
||||
sed -i -e "s/localhost/localhost:${KRB5_PORT}/" ${KRB5_CONFIG}
|
||||
|
||||
echo type=smb
|
||||
echo host=localhost
|
||||
echo port=$SMB_PORT
|
||||
echo use_kerberos=true
|
||||
echo _connect=127.0.0.1:${SMB_PORT}
|
||||
}
|
||||
|
||||
. $(dirname "$0")/run.bash
|
||||
113
go.mod
113
go.mod
@@ -1,15 +1,15 @@
|
||||
module github.com/rclone/rclone
|
||||
|
||||
go 1.21
|
||||
go 1.22.0
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
|
||||
github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e
|
||||
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
|
||||
@@ -17,11 +17,11 @@ require (
|
||||
github.com/anacrolix/dms v1.7.1
|
||||
github.com/anacrolix/log v0.16.0
|
||||
github.com/atotto/clipboard v0.1.4
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.10
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.51
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2
|
||||
github.com/aws/smithy-go v1.22.1
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0
|
||||
@@ -31,11 +31,11 @@ require (
|
||||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/dop251/scsu v0.0.0-20220106150536-84ac88021d00
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5
|
||||
github.com/gabriel-vasile/mimetype v1.4.7
|
||||
github.com/gdamore/tcell/v2 v2.7.4
|
||||
github.com/go-chi/chi/v5 v5.1.0
|
||||
github.com/gabriel-vasile/mimetype v1.4.8
|
||||
github.com/gdamore/tcell/v2 v2.8.1
|
||||
github.com/go-chi/chi/v5 v5.2.0
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348
|
||||
github.com/go-git/go-billy/v5 v5.6.0
|
||||
github.com/go-git/go-billy/v5 v5.6.2
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hanwen/go-fuse/v2 v2.7.2
|
||||
github.com/henrybear327/Proton-API-Bridge v1.0.0
|
||||
@@ -47,56 +47,56 @@ require (
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988
|
||||
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-colorable v0.1.14
|
||||
github.com/mattn/go-runewidth v0.0.16
|
||||
github.com/minio/minio-go/v7 v7.0.74
|
||||
github.com/minio/minio-go/v7 v7.0.83
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/ncw/swift/v2 v2.0.3
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0
|
||||
github.com/oracle/oci-go-sdk/v65 v65.81.1
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/sftp v1.13.7
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/quasilyte/go-ruleguard/dsl v0.3.22
|
||||
github.com/rclone/gofakes3 v0.0.3
|
||||
github.com/rclone/gofakes3 v0.0.4
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/rivo/uniseg v0.4.7
|
||||
github.com/rogpeppe/go-internal v1.12.0
|
||||
github.com/rogpeppe/go-internal v1.13.1
|
||||
github.com/shirou/gopsutil/v4 v4.24.12
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5
|
||||
github.com/unknwon/goconfig v1.0.0
|
||||
github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
|
||||
github.com/xanzy/ssh-agent v0.3.3
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
github.com/zeebo/blake3 v0.2.3
|
||||
go.etcd.io/bbolt v1.3.10
|
||||
github.com/zeebo/blake3 v0.2.4
|
||||
go.etcd.io/bbolt v1.3.11
|
||||
goftp.io/server/v2 v2.0.1
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/oauth2 v0.24.0
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/net v0.34.0
|
||||
golang.org/x/oauth2 v0.25.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/text v0.21.0
|
||||
golang.org/x/time v0.8.0
|
||||
google.golang.org/api v0.211.0
|
||||
golang.org/x/time v0.9.0
|
||||
google.golang.org/api v0.216.0
|
||||
gopkg.in/validator.v2 v2.0.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
storj.io/uplink v1.13.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.12.1 // indirect
|
||||
cloud.google.com/go/auth v0.13.0 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.5.2 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
@@ -110,18 +110,18 @@ require (
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
@@ -140,20 +140,20 @@ require (
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/noise v1.0.1 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/gdamore/encoding v1.0.1 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/goccy/go-json v0.10.4 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect
|
||||
github.com/gorilla/schema v1.4.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -169,7 +169,7 @@ require (
|
||||
github.com/jcmturner/goidentity/v6 v6.0.1 // indirect
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
|
||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
@@ -177,6 +177,7 @@ require (
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/xxml v0.0.3 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
@@ -189,7 +190,7 @@ require (
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 // indirect
|
||||
github.com/relvacode/iso8601 v1.3.0 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||
@@ -203,16 +204,16 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.29.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583 // indirect
|
||||
google.golang.org/grpc v1.67.1 // indirect
|
||||
google.golang.org/protobuf v1.35.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/tools v0.29.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect
|
||||
storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 // indirect
|
||||
@@ -223,9 +224,9 @@ require (
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.3
|
||||
github.com/ProtonMail/go-crypto v1.1.4
|
||||
github.com/golang-jwt/jwt/v4 v4.5.1
|
||||
github.com/pkg/xattr v0.4.10
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6
|
||||
golang.org/x/term v0.27.0
|
||||
golang.org/x/mobile v0.0.0-20250106192035-c31d5b91ecc3
|
||||
golang.org/x/term v0.28.0
|
||||
)
|
||||
|
||||
245
go.sum
245
go.sum
@@ -15,8 +15,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/auth v0.12.1 h1:n2Bj25BUMM0nvE9D2XLTiImanwZhO3DkfWSYS/SAJP4=
|
||||
cloud.google.com/go/auth v0.12.1/go.mod h1:BFMu+TNpF3DmvfBO9ClqTR/SiqVIm7LukKF9mbendF4=
|
||||
cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
|
||||
cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
@@ -25,8 +25,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
|
||||
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
|
||||
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
|
||||
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
@@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
|
||||
@@ -61,8 +61,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mx
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97 h1:c+mQoiES/21JrHDAxJLCYICJO+bu8Clv0ZDNZe7Ndyk=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107 h1:TRDGQGYANuuMxX8JU++oQKEGitJpKWC1EB0SbNXRVqI=
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.107/go.mod h1:Y/bCHoPJNPKz2hw1ADXjQXJP378HODwK+g/5SR2gqfU=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
|
||||
@@ -77,8 +77,8 @@ github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e h1:lCsqUUACrcM
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk=
|
||||
github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/ProtonMail/go-crypto v1.1.4 h1:G5U5asvD5N/6/36oIw3k2bOfBn5XVcZrb7PBjzzKKoE=
|
||||
github.com/ProtonMail/go-crypto v1.1.4/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
@@ -108,42 +108,42 @@ github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc h1:LoL75er
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43 h1:iLdpkYZ4cXIQMO7ud+cqMWR1xK5ESbt1rvN77tRi1BY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43/go.mod h1:OgbsKPAswXDd5kxnR4vZov69p3oYjbvUyIRBAAV0y9o=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 h1:IBAoD/1d8A8/1aA8g4MBVtTRHhXRiNAgwdbo/xRM2DI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23/go.mod h1:vfENuCM7dofkgKpYzuzf1VT1UKkA/YL3qanfBn7HCaA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 h1:XnXVe2zRyPf0+fAW5L05esmngvBpC6DQZK7oZB/z/Co=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48/go.mod h1:S3wey90OrS4f7kYxH6PT175YyEcHTORY07++HurMaRM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 h1:jSJjSBzw8VDIbWv+mmvBSP8ezsztMYJGH+eKqi9AmNs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27/go.mod h1:/DAhLbFRgwhmvJdOfSm+WwikZrCuUJiA4WgJG0fTNSw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5foXDhPlnqcNRG2QUyvca300lXh8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25 h1:r67ps7oHCYnflpgDy2LZU0MAQtQbYIOqNNnqGO6xQkE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.25/go.mod h1:GrGY+Q4fIokYLtjCVB/aFfCVL6hhGUFl8inD18fDalE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 h1:AmB5QxnD+fBFrg9LcqzkgF/CaYvMyU/BTlejG4t1S7Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27/go.mod h1:Sai7P3xTiyv9ZUYO3IFxMnmiIP759/67iQbU4kdmkyU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 h1:HCpPsWqmYQieU7SS6E9HXfdAMSud0pteVXieJmcpIRI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6/go.mod h1:ngUiVRCco++u+soRRVBIvBZxSMMvOVMXA4PJ36JLfSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 h1:BbGDtTi0T1DYlmjBiCr/le3wzhA37O8QTC5/Ab8+EXk=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6/go.mod h1:hLMJt7Q8ePgViKupeymbqI0la+t9/iYFBjxQCFwuAwI=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 h1:nyuzXooUNJexRT0Oy0UQY6AhOzxPxhtt4DcBIHyCnmw=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0/go.mod h1:sT/iQz8JK3u/5gZkT+Hmr7GzVZehUMkRZpOaAwYXeGY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 h1:iwYS40JnrBeA9e9aI5S6KKN4EB2zR4iUVYN0nwVivz4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8/go.mod h1:Fm9Mi+ApqmFiknZtGpohVcBGvpTu542VC4XO9YudRi0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 h1:/Mn7gTedG86nbpjT4QEKsN1D/fThiYe1qvq7WsBGNHg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8/go.mod h1:Ae3va9LPmvjj231ukHB6UeT8nS7wTPfC3tMZSZMwNYg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 h1:a7aQ3RW+ug4IbhoQp29NZdc7vqrzKZZfWZSaQAXOZvQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2/go.mod h1:xMekrnhmJ5aqmyxtmALs7mlvXw5xRh+eYjOjvrIIFJ4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8/go.mod h1:/kiBvRQXBc6xeJTYzhSdGvJ5vm1tjaDEjH+MSeRJnlY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 h1:VwhTrsTuVn52an4mXx29PqRzs2Dvu921NpGk7y43tAM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6/go.mod h1:+8h7PZb3yY5ftmVLD7ocEoE98hdc8PoKS0H3wfx1dlc=
|
||||
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
|
||||
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -237,24 +237,24 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.7 h1:SKFKl7kD0RiPdbht0s7hFtjl489WcQ1VyPW8ZzUMYCA=
|
||||
github.com/gabriel-vasile/mimetype v1.4.7/go.mod h1:GDlAgAyIRT27BhFl53XNAFtfjzOkLaF35JdEG0P7LtU=
|
||||
github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
|
||||
github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
|
||||
github.com/gdamore/tcell/v2 v2.7.4 h1:sg6/UnTM9jGpZU+oFYAsDahfchWAFW8Xx2yFinNSAYU=
|
||||
github.com/gdamore/tcell/v2 v2.7.4/go.mod h1:dSXtXTSK0VsW1biw65DZLZ2NKr7j0qP/0J7ONmsraWg=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
|
||||
github.com/gdamore/encoding v1.0.1 h1:YzKZckdBL6jVt2Gc+5p82qhrGiqMdG/eNs6Wy0u3Uhw=
|
||||
github.com/gdamore/encoding v1.0.1/go.mod h1:0Z0cMFinngz9kS1QfMjCP8TY7em3bZYeeklsSDPivEo=
|
||||
github.com/gdamore/tcell/v2 v2.8.1 h1:KPNxyqclpWpWQlPLx6Xui1pMk8S+7+R37h3g07997NU=
|
||||
github.com/gdamore/tcell/v2 v2.8.1/go.mod h1:bj8ori1BG3OYMjmb3IklZVWfZUJ1UBQt9JXrOCOhGWw=
|
||||
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
|
||||
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
|
||||
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/go-chi/chi/v5 v5.1.0 h1:acVI1TYaD+hhedDJ3r54HyA6sExp3HfXq7QWEEY/xMw=
|
||||
github.com/go-chi/chi/v5 v5.1.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-chi/chi/v5 v5.2.0 h1:Aj1EtB0qR2Rdo2dG4O94RIU35w2lvQSj6BRA4+qwFL0=
|
||||
github.com/go-chi/chi/v5 v5.2.0/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 h1:JnrjqG5iR07/8k7NqrLNilRsl3s1EPRQEGvbPyOce68=
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348/go.mod h1:Czxo/d1g948LtrALAZdL04TL/HnkopquAjxYUuI02bo=
|
||||
github.com/go-git/go-billy/v5 v5.6.0 h1:w2hPNtoehvJIxR00Vb4xX94qHQi/ApZfX+nBE2Cjio8=
|
||||
github.com/go-git/go-billy/v5 v5.6.0/go.mod h1:sFDq7xD3fn3E0GOwUSZqHo9lrkmx8xJhA0ZrfvjBRGM=
|
||||
github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
|
||||
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -280,8 +280,8 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM=
|
||||
github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
@@ -315,6 +315,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -346,8 +348,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gT
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
|
||||
github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
|
||||
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -419,9 +421,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 h1:CjEMN21Xkr9+zwPmZPaJJw+apzVbjGL5uK/6g9Q2jGU=
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988/go.mod h1:/agobYum3uo/8V6yPVnq+R82pyVGCeuWW5arT4Txn8A=
|
||||
@@ -447,20 +448,20 @@ github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0=
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/minio-go/v7 v7.0.74 h1:fTo/XlPBTSpo3BAMshlwKL5RspXRv9us5UeHEGYCFe0=
|
||||
github.com/minio/minio-go/v7 v7.0.74/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
|
||||
github.com/minio/minio-go/v7 v7.0.83 h1:W4Kokksvlz3OKf3OqIlzDNKd4MERlC2oN8YptwJ0+GA=
|
||||
github.com/minio/minio-go/v7 v7.0.83/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU=
|
||||
github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
@@ -486,8 +487,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0 h1:Rr7QLMozd2DfDBKo6AB3DzLYQxAwuOG118+K5AAD5E8=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.81.1 h1:JYc47bk8n/MUchA2KHu1ggsCQzlJZQLJ+tTKfOho00E=
|
||||
github.com/oracle/oci-go-sdk/v65 v65.81.1/go.mod h1:IBEV9l1qBzUpo7zgGaRUhbB05BVfcDGYRFBCPlTcPp0=
|
||||
github.com/panjf2000/ants/v2 v2.9.1 h1:Q5vh5xohbsZXGcD6hhszzGqB7jSSc2/CRr3QKIga8Kw=
|
||||
github.com/panjf2000/ants/v2 v2.9.1/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
@@ -529,8 +530,8 @@ github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1
|
||||
github.com/quic-go/quic-go v0.40.1/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93 h1:UVArwN/wkKjMVhh2EQGC0tEc1+FqiLlvYXY5mQ2f8Wg=
|
||||
github.com/rasky/go-xdr v0.0.0-20170124162913-1a41d1a06c93/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o=
|
||||
github.com/rclone/gofakes3 v0.0.3 h1:0sKCxJ8TUUAG5KXGuc/fcDKGnzB/j6IjNQui9ntIZPo=
|
||||
github.com/rclone/gofakes3 v0.0.3/go.mod h1:z7+o2VUwitO0WuVHReQlOW9jZ03LpeJ0PUFSULyTIds=
|
||||
github.com/rclone/gofakes3 v0.0.4 h1:LswpC49VY/UJ1zucoL5ktnOEX6lq3qK7e1aFIAfqCbk=
|
||||
github.com/rclone/gofakes3 v0.0.4/go.mod h1:j/UoS+2/Mr7xAlfKhyVC58YyFQmh9uoQA5YZQXQUqmg=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/relvacode/iso8601 v1.3.0 h1:HguUjsGpIMh/zsTczGN3DVJFxTU/GX+MMmzcKoMO7ko=
|
||||
@@ -542,10 +543,10 @@ github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=
|
||||
@@ -599,8 +600,8 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002 h1:jevGbwKzMmHLgHAaDaMJLQX3jpXUWjUvnsrPeMgkM7o=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002/go.mod h1:0Mv/XWQoRWF7d7jkc4DufsAJQg8xyZ5NtCkY59wECQY=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 h1:Sa+sR8aaAMFwxhXWENEnE6ZpqhZ9d7u1RT2722Rw6hc=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5/go.mod h1:UdZiFUFu6e2WjjtjxivwXWcwc1N/8zgbkBR9QNucUOY=
|
||||
github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8=
|
||||
github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
|
||||
github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
|
||||
@@ -633,17 +634,16 @@ github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4=
|
||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
|
||||
github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A=
|
||||
github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
|
||||
github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
|
||||
github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
|
||||
github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
|
||||
github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
|
||||
github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
|
||||
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
|
||||
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
|
||||
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
|
||||
go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0=
|
||||
go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ=
|
||||
go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0=
|
||||
go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
@@ -651,12 +651,16 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
|
||||
go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
|
||||
go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
|
||||
go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
|
||||
go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
|
||||
go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo=
|
||||
@@ -678,10 +682,14 @@ golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0
|
||||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
|
||||
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -708,8 +716,8 @@ golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPI
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6 h1:/VlmIrkuLf2wzPjkZ8imSpckHoW7Y71h66dxbLHSpi8=
|
||||
golang.org/x/mobile v0.0.0-20240716161057-1ad2df20a8b6/go.mod h1:TCsc78+c4cqb8IKEosz2LwJ6YRNkIjMuAYeHYjchGDE=
|
||||
golang.org/x/mobile v0.0.0-20250106192035-c31d5b91ecc3 h1:8LrYkH99trX3onYF3dT9frUSRDXokkceG+9tHBaDAFQ=
|
||||
golang.org/x/mobile v0.0.0-20250106192035-c31d5b91ecc3/go.mod h1:sY92m3V/rTEa4JCJ1FkKHK978K6wxOSX1PStMYo+6wI=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
@@ -719,8 +727,11 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -761,17 +772,20 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
|
||||
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -784,6 +798,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -831,7 +847,6 @@ golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -839,11 +854,15 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
@@ -851,11 +870,14 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
|
||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -869,14 +891,15 @@ golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -925,8 +948,10 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
|
||||
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -947,8 +972,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.211.0 h1:IUpLjq09jxBSV1lACO33CGY3jsRcbctfGzhj+ZSE/Bg=
|
||||
google.golang.org/api v0.211.0/go.mod h1:XOloB4MXFH4UTlQSGuNUxw0UT74qdENK8d6JNsXKLi0=
|
||||
google.golang.org/api v0.216.0 h1:xnEHy+xWFrtYInWPy8OdGFsyIfWJjtVnO39g7pz2BFY=
|
||||
google.golang.org/api v0.216.0/go.mod h1:K9wzQMvWi47Z9IU7OgdOofvZuw75Ge3PPITImZR/UyI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -985,10 +1010,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697 h1:pgr/4QbFyktUv9CtQ/Fq4gzEE6/Xs7iCXbktaGzLHbQ=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241118233622-e639e219e697/go.mod h1:+D9ySVjN8nY8YCVjc5O7PZDIdZporIDY3KaGfJunh88=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583 h1:IfdSdTcLFy4lqUQrQJLkLt1PB+AsqVz6lwkWPzWEz10=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241206012308-a4fef0638583/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@@ -1001,8 +1026,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E=
|
||||
google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
|
||||
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
|
||||
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -1013,8 +1038,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
|
||||
@@ -66,8 +66,9 @@ default based on the batch_mode in use.
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "batch_commit_timeout",
|
||||
Help: `Max time to wait for a batch to finish committing`,
|
||||
Help: `Max time to wait for a batch to finish committing. (no longer used)`,
|
||||
Default: fs.Duration(10 * time.Minute),
|
||||
Advanced: true,
|
||||
Hide: fs.OptionHideBoth,
|
||||
}}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,9 @@ func Split(absPath string) (bucket, bucketPath string) {
|
||||
|
||||
// Join path1 and path2
|
||||
//
|
||||
// Like path.Join but does not clean the path - useful to preserve trailing /
|
||||
// Like path.Join but does not clean the path - useful to preserve trailing /.
|
||||
//
|
||||
// It also does not clean multiple // in the path.
|
||||
func Join(path1, path2 string) string {
|
||||
if path1 == "" {
|
||||
return path2
|
||||
@@ -39,7 +41,22 @@ func Join(path1, path2 string) string {
|
||||
if path2 == "" {
|
||||
return path1
|
||||
}
|
||||
return strings.TrimSuffix(path1, "/") + "/" + strings.TrimPrefix(path2, "/")
|
||||
return path1 + "/" + path2
|
||||
}
|
||||
|
||||
// IsAllSlashes returns true if s is all / characters.
|
||||
//
|
||||
// It returns false if s is "".
|
||||
func IsAllSlashes(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if c != '/' {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Cache stores whether buckets are available and their IDs
|
||||
|
||||
@@ -34,10 +34,10 @@ func TestJoin(t *testing.T) {
|
||||
{in1: "in1", in2: "", want: "in1"},
|
||||
{in1: "", in2: "in2", want: "in2"},
|
||||
{in1: "in1", in2: "in2", want: "in1/in2"},
|
||||
{in1: "in1/", in2: "in2", want: "in1/in2"},
|
||||
{in1: "in1", in2: "/in2", want: "in1/in2"},
|
||||
{in1: "in1/", in2: "in2", want: "in1//in2"},
|
||||
{in1: "in1", in2: "/in2", want: "in1//in2"},
|
||||
{in1: "in1", in2: "in2/", want: "in1/in2/"},
|
||||
{in1: "/in1", in2: "/in2", want: "/in1/in2"},
|
||||
{in1: "/in1", in2: "/in2", want: "/in1//in2"},
|
||||
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
|
||||
} {
|
||||
got := Join(test.in1, test.in2)
|
||||
@@ -45,6 +45,24 @@ func TestJoin(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAllSlashes(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want bool
|
||||
}{
|
||||
{in: "", want: false},
|
||||
{in: "/", want: true},
|
||||
{in: "x/", want: false},
|
||||
{in: "/x", want: false},
|
||||
{in: "//", want: true},
|
||||
{in: "/x/", want: false},
|
||||
{in: "///", want: true},
|
||||
} {
|
||||
got := IsAllSlashes(test.in)
|
||||
assert.Equal(t, test.want, got, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
c := NewCache()
|
||||
errBoom := errors.New("boom")
|
||||
|
||||
@@ -19,7 +19,11 @@ By default this will serve files without needing a login.
|
||||
You can either use an htpasswd file which can take lots of users, or
|
||||
set a single username and password with the ` + "`--{{ .Prefix }}user` and `--{{ .Prefix }}pass`" + ` flags.
|
||||
|
||||
If no static users are configured by either of the above methods, and client
|
||||
Alternatively, you can have the reverse proxy manage authentication and use the
|
||||
username provided in the configured header with ` + "`--user-from-header`" + ` (e.g., ` + "`--{{ .Prefix }}--user-from-header=x-remote-user`" + `).
|
||||
Ensure the proxy is trusted and headers cannot be spoofed, as misconfiguration may lead to unauthorized access.
|
||||
|
||||
If either of the above authentication methods is not configured and client
|
||||
certificates are required by the ` + "`--client-ca`" + ` flag passed to the server, the
|
||||
client certificate common name will be considered as the username.
|
||||
|
||||
@@ -85,16 +89,21 @@ var AuthConfigInfo = fs.Options{{
|
||||
Name: "salt",
|
||||
Default: "dlPL2MqE",
|
||||
Help: "Password hashing salt",
|
||||
}, {
|
||||
Name: "user_from_header",
|
||||
Default: "",
|
||||
Help: "User name from a defined HTTP header",
|
||||
}}
|
||||
|
||||
// AuthConfig contains options for the http authentication
|
||||
type AuthConfig struct {
|
||||
HtPasswd string `config:"htpasswd"` // htpasswd file - if not provided no authentication is done
|
||||
Realm string `config:"realm"` // realm for authentication
|
||||
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
|
||||
BasicPass string `config:"pass"` // password for BasicUser
|
||||
Salt string `config:"salt"` // password hashing salt
|
||||
CustomAuthFn CustomAuthFn `json:"-" config:"-"` // custom Auth (not set by command line flags)
|
||||
HtPasswd string `config:"htpasswd"` // htpasswd file - if not provided no authentication is done
|
||||
Realm string `config:"realm"` // realm for authentication
|
||||
BasicUser string `config:"user"` // single username for basic auth if not using Htpasswd
|
||||
BasicPass string `config:"pass"` // password for BasicUser
|
||||
Salt string `config:"salt"` // password hashing salt
|
||||
UserFromHeader string `config:"user_from_header"` // retrieve user name from a defined HTTP header
|
||||
CustomAuthFn CustomAuthFn `json:"-" config:"-"` // custom Auth (not set by command line flags)
|
||||
}
|
||||
|
||||
// AddFlagsPrefix adds flags to the flag set for AuthConfig
|
||||
@@ -104,6 +113,7 @@ func (cfg *AuthConfig) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) {
|
||||
flags.StringVarP(flagSet, &cfg.BasicUser, prefix+"user", "", cfg.BasicUser, "User name for authentication", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.BasicPass, prefix+"pass", "", cfg.BasicPass, "Password for authentication", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.Salt, prefix+"salt", "", cfg.Salt, "Password hashing salt", prefix)
|
||||
flags.StringVarP(flagSet, &cfg.UserFromHeader, prefix+"user-from-header", "", cfg.UserFromHeader, "Retrieve the username from a specified HTTP header if no other authentication methods are configured (ideal for proxied setups)", prefix)
|
||||
}
|
||||
|
||||
// AddAuthFlagsPrefix adds flags to the flag set for AuthConfig
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -153,6 +154,26 @@ func MiddlewareAuthCustom(fn CustomAuthFn, realm string, userFromContext bool) M
|
||||
}
|
||||
}
|
||||
|
||||
var validUsernameRegexp = regexp.MustCompile(`^[\p{L}\d@._-]+$`)
|
||||
|
||||
// MiddlewareAuthGetUserFromHeader middleware that bypasses authentication and extracts the user via a specified HTTP header(ideal for proxied setups).
|
||||
func MiddlewareAuthGetUserFromHeader(header string) Middleware {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
username := strings.TrimSpace(r.Header.Get(header))
|
||||
if username != "" && validUsernameRegexp.MatchString(username) {
|
||||
r = r.WithContext(context.WithValue(r.Context(), ctxKeyUser, username))
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
code := http.StatusUnauthorized
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
http.Error(w, http.StatusText(code), code)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var onlyOnceWarningAllowOrigin sync.Once
|
||||
|
||||
// MiddlewareCORS instantiates middleware that handles basic CORS protections for rcd
|
||||
|
||||
@@ -14,11 +14,13 @@ import (
|
||||
|
||||
func TestMiddlewareAuth(t *testing.T) {
|
||||
servers := []struct {
|
||||
name string
|
||||
http Config
|
||||
auth AuthConfig
|
||||
user string
|
||||
pass string
|
||||
name string
|
||||
expectedUser string
|
||||
remoteUser string
|
||||
http Config
|
||||
auth AuthConfig
|
||||
user string
|
||||
pass string
|
||||
}{
|
||||
{
|
||||
name: "Basic",
|
||||
@@ -85,9 +87,32 @@ func TestMiddlewareAuth(t *testing.T) {
|
||||
},
|
||||
user: "custom",
|
||||
pass: "custom",
|
||||
}, {
|
||||
name: "UserFromHeader",
|
||||
remoteUser: "remoteUser",
|
||||
expectedUser: "remoteUser",
|
||||
http: Config{
|
||||
ListenAddr: []string{"127.0.0.1:0"},
|
||||
},
|
||||
auth: AuthConfig{
|
||||
UserFromHeader: "X-Remote-User",
|
||||
},
|
||||
}, {
|
||||
name: "UserFromHeader/MixedWithHtPasswd",
|
||||
remoteUser: "remoteUser",
|
||||
expectedUser: "md5",
|
||||
http: Config{
|
||||
ListenAddr: []string{"127.0.0.1:0"},
|
||||
},
|
||||
auth: AuthConfig{
|
||||
UserFromHeader: "X-Remote-User",
|
||||
Realm: "test",
|
||||
HtPasswd: "./testdata/.htpasswd",
|
||||
},
|
||||
user: "md5",
|
||||
pass: "md5",
|
||||
},
|
||||
}
|
||||
|
||||
for _, ss := range servers {
|
||||
t.Run(ss.name, func(t *testing.T) {
|
||||
s, err := NewServer(context.Background(), WithConfig(ss.http), WithAuth(ss.auth))
|
||||
@@ -97,7 +122,12 @@ func TestMiddlewareAuth(t *testing.T) {
|
||||
}()
|
||||
|
||||
expected := []byte("secret-page")
|
||||
s.Router().Mount("/", testEchoHandler(expected))
|
||||
if ss.expectedUser != "" {
|
||||
s.Router().Mount("/", testAuthUserHandler())
|
||||
} else {
|
||||
s.Router().Mount("/", testEchoHandler(expected))
|
||||
}
|
||||
|
||||
s.Serve()
|
||||
|
||||
url := testGetServerURL(t, s)
|
||||
@@ -114,18 +144,24 @@ func TestMiddlewareAuth(t *testing.T) {
|
||||
}()
|
||||
|
||||
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using no creds should return unauthorized")
|
||||
|
||||
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
||||
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
||||
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
||||
if ss.auth.UserFromHeader == "" {
|
||||
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
||||
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
||||
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("BadCreds", func(t *testing.T) {
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.SetBasicAuth(ss.user+"BAD", ss.pass+"BAD")
|
||||
if ss.user != "" {
|
||||
req.SetBasicAuth(ss.user+"BAD", ss.pass+"BAD")
|
||||
}
|
||||
|
||||
if ss.auth.UserFromHeader != "" {
|
||||
req.Header.Set(ss.auth.UserFromHeader, "/test:")
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
@@ -134,10 +170,11 @@ func TestMiddlewareAuth(t *testing.T) {
|
||||
}()
|
||||
|
||||
require.Equal(t, http.StatusUnauthorized, resp.StatusCode, "using bad creds should return unauthorized")
|
||||
|
||||
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
||||
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
||||
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
||||
if ss.auth.UserFromHeader == "" {
|
||||
wwwAuthHeader := resp.Header.Get("WWW-Authenticate")
|
||||
require.NotEmpty(t, wwwAuthHeader, "resp should contain WWW-Authtentication header")
|
||||
require.Contains(t, wwwAuthHeader, fmt.Sprintf("realm=%q", ss.auth.Realm), "WWW-Authtentication header should contain relam")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("GoodCreds", func(t *testing.T) {
|
||||
@@ -145,7 +182,13 @@ func TestMiddlewareAuth(t *testing.T) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.SetBasicAuth(ss.user, ss.pass)
|
||||
if ss.user != "" {
|
||||
req.SetBasicAuth(ss.user, ss.pass)
|
||||
}
|
||||
|
||||
if ss.auth.UserFromHeader != "" {
|
||||
req.Header.Set(ss.auth.UserFromHeader, ss.remoteUser)
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
require.NoError(t, err)
|
||||
@@ -155,7 +198,11 @@ func TestMiddlewareAuth(t *testing.T) {
|
||||
|
||||
require.Equal(t, http.StatusOK, resp.StatusCode, "using good creds should return ok")
|
||||
|
||||
testExpectRespBody(t, resp, expected)
|
||||
if ss.expectedUser != "" {
|
||||
testExpectRespBody(t, resp, []byte(ss.expectedUser))
|
||||
} else {
|
||||
testExpectRespBody(t, resp, expected)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -392,16 +392,23 @@ func NewServer(ctx context.Context, options ...Option) (*Server, error) {
|
||||
|
||||
func (s *Server) initAuth() {
|
||||
s.usingAuth = false
|
||||
altUsernameEnabled := s.auth.HtPasswd == "" && s.auth.BasicUser == ""
|
||||
|
||||
authCertificateUserEnabled := s.tlsConfig != nil && s.tlsConfig.ClientAuth != tls.NoClientCert && s.auth.HtPasswd == "" && s.auth.BasicUser == ""
|
||||
if authCertificateUserEnabled {
|
||||
if altUsernameEnabled {
|
||||
s.usingAuth = true
|
||||
s.mux.Use(MiddlewareAuthCertificateUser())
|
||||
if s.auth.UserFromHeader != "" {
|
||||
s.mux.Use(MiddlewareAuthGetUserFromHeader(s.auth.UserFromHeader))
|
||||
} else if s.tlsConfig != nil && s.tlsConfig.ClientAuth != tls.NoClientCert {
|
||||
s.mux.Use(MiddlewareAuthCertificateUser())
|
||||
} else {
|
||||
s.usingAuth = false
|
||||
altUsernameEnabled = false
|
||||
}
|
||||
}
|
||||
|
||||
if s.auth.CustomAuthFn != nil {
|
||||
s.usingAuth = true
|
||||
s.mux.Use(MiddlewareAuthCustom(s.auth.CustomAuthFn, s.auth.Realm, authCertificateUserEnabled))
|
||||
s.mux.Use(MiddlewareAuthCustom(s.auth.CustomAuthFn, s.auth.Realm, altUsernameEnabled))
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,10 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
|
||||
inode: newInode(),
|
||||
items: make(map[string]Node),
|
||||
}
|
||||
d.cleanupTimer = time.AfterFunc(time.Duration(vfs.Opt.DirCacheTime*2), d.cacheCleanup)
|
||||
// Set timer up like this to avoid race of d.cacheCleanup being called
|
||||
// before d.cleanupTimer is assigned to
|
||||
d.cleanupTimer = time.AfterFunc(time.Hour, d.cacheCleanup)
|
||||
d.cleanupTimer.Reset(time.Duration(vfs.Opt.DirCacheTime * 2))
|
||||
return d
|
||||
}
|
||||
|
||||
|
||||
@@ -464,7 +464,7 @@ the |--vfs-cache-mode| is off, it will return an empty result.
|
||||
],
|
||||
}
|
||||
|
||||
The |expiry| time is the time until the file is elegible for being
|
||||
The |expiry| time is the time until the file is eligible for being
|
||||
uploaded in floating point seconds. This may go negative. As rclone
|
||||
only transfers |--transfers| files at once, only the lowest
|
||||
|--transfers| expiry times will have |uploading| as |true|. So there
|
||||
|
||||
@@ -216,7 +216,7 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
|
||||
configName := fs.ConfigString(f)
|
||||
for _, activeVFS := range active[configName] {
|
||||
if vfs.Opt == activeVFS.Opt {
|
||||
fs.Debugf(f, "Re-using VFS from active cache")
|
||||
fs.Debugf(f, "Reusing VFS from active cache")
|
||||
activeVFS.inUse.Add(1)
|
||||
return activeVFS
|
||||
}
|
||||
@@ -365,6 +365,11 @@ func (vfs *VFS) Shutdown() {
|
||||
activeMu.Unlock()
|
||||
|
||||
vfs.shutdownCache()
|
||||
|
||||
if vfs.pollChan != nil {
|
||||
close(vfs.pollChan)
|
||||
vfs.pollChan = nil
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp deletes the contents of the on disk cache
|
||||
|
||||
@@ -428,7 +428,7 @@ func TestItemReloadCacheStale(t *testing.T) {
|
||||
assert.Equal(t, int64(110), fi.Size())
|
||||
|
||||
// Write to the file to make it dirty
|
||||
// This checks we aren't re-using stale data
|
||||
// This checks we aren't reusing stale data
|
||||
n, err := item.WriteAt([]byte("HELLO"), 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, n)
|
||||
|
||||
Reference in New Issue
Block a user