1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-22 04:13:14 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
bd33b0e144 local: fix permission and ownership on symlinks with --links and --metadata
Before this change, if writing to a local backend with --metadata and
--links, if the incoming metadata contained mode or ownership
information then rclone would apply the mode/ownership to the
destination of the link not the link itself.

This fixes the problem by using the link safe sycall variants
lchown/fchmodat when --links and --metadata is in use. Note that Linux
does not support setting permissions on symlinks, so rclone emits a
debug message in this case.

This also fixes setting times on symlinks on Windows which wasn't
implemented for atime, mtime and was incorrectly setting the target of
the symlink for btime.

See: https://github.com/rclone/rclone/security/advisories/GHSA-hrxh-9w67-g4cv
2024-11-14 12:51:18 +00:00
253 changed files with 11323 additions and 16703 deletions

View File

@@ -26,12 +26,12 @@ jobs:
strategy:
fail-fast: false
matrix:
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.22', 'go1.23']
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.21', 'go1.22']
include:
- job_name: linux
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '>=1.23.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -42,14 +42,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '>=1.23.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-latest
go: '>=1.24.0-rc.1'
go: '>=1.23.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -58,14 +58,14 @@ jobs:
- job_name: mac_arm64
os: macos-latest
go: '>=1.24.0-rc.1'
go: '>=1.23.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
go: '>=1.24.0-rc.1'
go: '>=1.23.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -75,20 +75,20 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '>=1.24.0-rc.1'
go: '>=1.23.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- job_name: go1.22
- job_name: go1.21
os: ubuntu-latest
go: '1.22'
go: '1.21'
quicktest: true
racequicktest: true
- job_name: go1.23
- job_name: go1.22
os: ubuntu-latest
go: '1.23'
go: '1.22'
quicktest: true
racequicktest: true
@@ -123,8 +123,7 @@ jobs:
sudo modprobe fuse
sudo chmod 666 /dev/fuse
sudo chown root:$USER /etc/fuse.conf
sudo apt-get update
sudo apt-get install -y fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
sudo apt-get install fuse3 libfuse-dev rpm pkg-config git-annex git-annex-remote-rclone nfs-common
if: matrix.os == 'ubuntu-latest'
- name: Install Libraries on macOS
@@ -311,7 +310,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '>=1.24.0-rc.1'
go-version: '>=1.23.0-rc.1'
- name: Set global environment variables
shell: bash

View File

@@ -12,8 +12,6 @@ RUN ./rclone version
# Begin final image
FROM alpine:latest
LABEL org.opencontainers.image.source="https://github.com/rclone/rclone"
RUN apk --no-cache add ca-certificates fuse3 tzdata && \
echo "user_allow_other" >> /etc/fuse.conf

2431
MANUAL.html generated

File diff suppressed because it is too large Load Diff

2476
MANUAL.md generated

File diff suppressed because it is too large Load Diff

2523
MANUAL.txt generated

File diff suppressed because it is too large Load Diff

View File

@@ -47,20 +47,13 @@ Early in the next release cycle update the dependencies.
* `git commit -a -v -m "build: update all dependencies"`
If the `make updatedirect` upgrades the version of go in the `go.mod`
go 1.22.0
then go to manual mode. `go1.22` here is the lowest supported version
then go to manual mode. `go1.20` here is the lowest supported version
in the `go.mod`.
If `make updatedirect` added a `toolchain` directive then remove it.
We don't want to force a toolchain on our users. Linux packagers are
often using a version of Go that is a few versions out of date.
```
go list -m -f '{{if not (or .Main .Indirect)}}{{.Path}}{{end}}' all > /tmp/potential-upgrades
go get -d $(cat /tmp/potential-upgrades)
go mod tidy -go=1.22 -compat=1.22
go mod tidy -go=1.20 -compat=1.20
```
If the `go mod tidy` fails use the output from it to remove the
@@ -93,16 +86,6 @@ build.
Once it compiles locally, push it on a test branch and commit fixes
until the tests pass.
### Major versions
The above procedure will not upgrade major versions, so v2 to v3.
However this tool can show which major versions might need to be
upgraded:
go run github.com/icholy/gomajor@latest list -major
Expect API breakage when updating major versions.
## Tidy beta
At some point after the release run

View File

@@ -1 +1 @@
v1.70.0
v1.69.0

View File

@@ -10,7 +10,6 @@ import (
_ "github.com/rclone/rclone/backend/box"
_ "github.com/rclone/rclone/backend/cache"
_ "github.com/rclone/rclone/backend/chunker"
_ "github.com/rclone/rclone/backend/cloudinary"
_ "github.com/rclone/rclone/backend/combine"
_ "github.com/rclone/rclone/backend/compress"
_ "github.com/rclone/rclone/backend/crypt"

File diff suppressed because it is too large Load Diff

View File

@@ -3,149 +3,16 @@
package azureblob
import (
"context"
"encoding/base64"
"strings"
"testing"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestBlockIDCreator(t *testing.T) {
// Check creation and random number
bic, err := newBlockIDCreator()
require.NoError(t, err)
bic2, err := newBlockIDCreator()
require.NoError(t, err)
assert.NotEqual(t, bic.random, bic2.random)
assert.NotEqual(t, bic.random, [8]byte{})
// Set random to known value for tests
bic.random = [8]byte{1, 2, 3, 4, 5, 6, 7, 8}
chunkNumber := uint64(0xFEDCBA9876543210)
// Check creation of ID
want := base64.StdEncoding.EncodeToString([]byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10, 1, 2, 3, 4, 5, 6, 7, 8})
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", want)
got := bic.newBlockID(chunkNumber)
assert.Equal(t, want, got)
assert.Equal(t, "/ty6mHZUMhABAgMEBQYHCA==", got)
// Test checkID is working
assert.NoError(t, bic.checkID(chunkNumber, got))
assert.ErrorContains(t, bic.checkID(chunkNumber, "$"+got), "illegal base64")
assert.ErrorContains(t, bic.checkID(chunkNumber, "AAAA"+got), "bad block ID length")
assert.ErrorContains(t, bic.checkID(chunkNumber+1, got), "expecting decoded")
assert.ErrorContains(t, bic2.checkID(chunkNumber, got), "random bytes")
}
func (f *Fs) testFeatures(t *testing.T) {
// Check first feature flags are set on this remote
func (f *Fs) InternalTest(t *testing.T) {
// Check first feature flags are set on this
// remote
enabled := f.Features().SetTier
assert.True(t, enabled)
enabled = f.Features().GetTier
assert.True(t, enabled)
}
type ReadSeekCloser struct {
*strings.Reader
}
func (r *ReadSeekCloser) Close() error {
return nil
}
// Stage a block at remote but don't commit it
func (f *Fs) stageBlockWithoutCommit(ctx context.Context, t *testing.T, remote string) {
var (
containerName, blobPath = f.split(remote)
containerClient = f.cntSVC(containerName)
blobClient = containerClient.NewBlockBlobClient(blobPath)
data = "uncommitted data"
blockID = "1"
blockIDBase64 = base64.StdEncoding.EncodeToString([]byte(blockID))
)
r := &ReadSeekCloser{strings.NewReader(data)}
_, err := blobClient.StageBlock(ctx, blockIDBase64, r, nil)
require.NoError(t, err)
// Verify the block is staged but not committed
blockList, err := blobClient.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
require.NoError(t, err)
found := false
for _, block := range blockList.UncommittedBlocks {
if *block.Name == blockIDBase64 {
found = true
break
}
}
require.True(t, found, "Block ID not found in uncommitted blocks")
}
// This tests uploading a blob where it has uncommitted blocks with a different ID size.
//
// https://gauravmantri.com/2013/05/18/windows-azure-blob-storage-dealing-with-the-specified-blob-or-block-content-is-invalid-error/
//
// TestIntegration/FsMkdir/FsPutFiles/Internal/WriteUncommittedBlocks
func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
var (
ctx = context.Background()
remote = "testBlob"
)
// Multipart copy the blob please
oldUseCopyBlob, oldCopyCutoff := f.opt.UseCopyBlob, f.opt.CopyCutoff
f.opt.UseCopyBlob = false
f.opt.CopyCutoff = f.opt.ChunkSize
defer func() {
f.opt.UseCopyBlob, f.opt.CopyCutoff = oldUseCopyBlob, oldCopyCutoff
}()
// Create a blob with uncommitted blocks
f.stageBlockWithoutCommit(ctx, t, remote)
// Now attempt to overwrite the block with a different sized block ID to provoke this error
// Check the object does not exist
_, err := f.NewObject(ctx, remote)
require.Equal(t, fs.ErrorObjectNotFound, err)
// Upload a multipart file over the block with uncommitted chunks of a different ID size
size := 4*int(f.opt.ChunkSize) - 1
contents := random.String(size)
item := fstest.NewItem(remote, contents, fstest.Time("2001-05-06T04:05:06.499Z"))
o := fstests.PutTestContents(ctx, t, f, &item, contents, true)
// Check size
assert.Equal(t, int64(size), o.Size())
// Create a new blob with uncommitted blocks
newRemote := "testBlob2"
f.stageBlockWithoutCommit(ctx, t, newRemote)
// Copy over that block
dst, err := f.Copy(ctx, o, newRemote)
require.NoError(t, err)
// Check basics
assert.Equal(t, int64(size), dst.Size())
assert.Equal(t, newRemote, dst.Remote())
// Check contents
gotContents := fstests.ReadObject(ctx, t, dst, -1)
assert.Equal(t, contents, gotContents)
// Remove the object
require.NoError(t, dst.Remove(ctx))
}
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
}

View File

@@ -15,17 +15,13 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestAzureBlob"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool", "Cold"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: defaultChunkSize,
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -44,7 +40,6 @@ func TestIntegration2(t *testing.T) {
},
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "directory_markers", Value: "true"},
{Name: name, Key: "use_copy_blob", Value: "false"},
},
})
}
@@ -53,13 +48,8 @@ func (f *Fs) SetUploadChunkSize(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setUploadChunkSize(cs)
}
func (f *Fs) SetCopyCutoff(cs fs.SizeSuffix) (fs.SizeSuffix, error) {
return f.setCopyCutoff(cs)
}
var (
_ fstests.SetUploadChunkSizer = (*Fs)(nil)
_ fstests.SetCopyCutoffer = (*Fs)(nil)
)
func TestValidateAccessTier(t *testing.T) {

View File

@@ -237,30 +237,6 @@ msi_client_id, or msi_mi_res_id parameters.`,
Help: "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
Advanced: true,
Sensitive: true,
}, {
Name: "disable_instance_discovery",
Help: `Skip requesting Microsoft Entra instance metadata
This should be set true only by applications authenticating in
disconnected clouds, or private clouds such as Azure Stack.
It determines whether rclone requests Microsoft Entra instance
metadata from ` + "`https://login.microsoft.com/`" + ` before
authenticating.
Setting this to true will skip this request, making you responsible
for ensuring the configured authority is valid and trustworthy.
`,
Default: false,
Advanced: true,
}, {
Name: "use_az",
Help: `Use Azure CLI tool az for authentication
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
as the sole means of authentication.
Setting this can be useful if you wish to use the az CLI on a host with
a System Managed Identity that you do not want to use.
Don't set env_auth at the same time.
`,
Default: false,
Advanced: true,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
@@ -343,12 +319,10 @@ type Options struct {
Username string `config:"username"`
Password string `config:"password"`
ServicePrincipalFile string `config:"service_principal_file"`
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
UseMSI bool `config:"use_msi"`
MSIObjectID string `config:"msi_object_id"`
MSIClientID string `config:"msi_client_id"`
MSIResourceID string `config:"msi_mi_res_id"`
UseAZ bool `config:"use_az"`
Endpoint string `config:"endpoint"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
MaxStreamSize fs.SizeSuffix `config:"max_stream_size"`
@@ -419,10 +393,8 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
policyClientOptions := policy.ClientOptions{
Transport: newTransporter(ctx),
}
backup := service.ShareTokenIntentBackup
clientOpt := service.ClientOptions{
ClientOptions: policyClientOptions,
FileRequestIntent: &backup,
ClientOptions: policyClientOptions,
}
// Here we auth by setting one of cred, sharedKeyCred or f.client
@@ -440,8 +412,7 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
}
// Read credentials from the environment
options := azidentity.DefaultAzureCredentialOptions{
ClientOptions: policyClientOptions,
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
ClientOptions: policyClientOptions,
}
cred, err = azidentity.NewDefaultAzureCredential(&options)
if err != nil {
@@ -452,13 +423,6 @@ func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.
if err != nil {
return nil, fmt.Errorf("create new shared key credential failed: %w", err)
}
case opt.UseAZ:
var options = azidentity.AzureCLICredentialOptions{}
cred, err = azidentity.NewAzureCLICredential(&options)
fmt.Println(cred)
if err != nil {
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
}
case opt.SASURL != "":
client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
if err != nil {
@@ -933,7 +897,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string
//
// May make a network request because the [fs.List] method does not
// May make a network request becaue the [fs.List] method does not
// return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {

View File

@@ -42,10 +42,9 @@ type Bucket struct {
// LifecycleRule is a single lifecycle rule
type LifecycleRule struct {
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
DaysFromStartingToCancelingUnfinishedLargeFiles *int `json:"daysFromStartingToCancelingUnfinishedLargeFiles"`
FileNamePrefix string `json:"fileNamePrefix"`
DaysFromHidingToDeleting *int `json:"daysFromHidingToDeleting"`
DaysFromUploadingToHiding *int `json:"daysFromUploadingToHiding"`
FileNamePrefix string `json:"fileNamePrefix"`
}
// Timestamp is a UTC time when this file was uploaded. It is a base

View File

@@ -30,7 +30,6 @@ import (
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
@@ -1319,22 +1318,16 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
// Check current version of the file
if deleteHidden && object.Action == "hide" {
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "remove hide marker") {
toBeDeleted <- object
}
toBeDeleted <- object
} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
if !operations.SkipDestructive(ctx, object.Name, "remove pending upload") {
toBeDeleted <- object
}
toBeDeleted <- object
} else {
fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
}
} else {
fs.Debugf(remote, "Deleting (id %q)", object.ID)
if !operations.SkipDestructive(ctx, object.Name, "delete") {
toBeDeleted <- object
}
toBeDeleted <- object
}
last = remote
tr.Done(ctx, nil)
@@ -2238,7 +2231,6 @@ This will dump something like this showing the lifecycle rules.
{
"daysFromHidingToDeleting": 1,
"daysFromUploadingToHiding": null,
"daysFromStartingToCancelingUnfinishedLargeFiles": null,
"fileNamePrefix": ""
}
]
@@ -2265,9 +2257,8 @@ overwrites will still cause versions to be made.
See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
`,
Opts: map[string]string{
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
"daysFromStartingToCancelingUnfinishedLargeFiles": "Cancels any unfinished large file versions after this many days",
"daysFromHidingToDeleting": "After a file has been hidden for this many days it is deleted. 0 is off.",
"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
},
}
@@ -2287,23 +2278,14 @@ func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, op
}
newRule.DaysFromUploadingToHiding = &days
}
if daysStr := opt["daysFromStartingToCancelingUnfinishedLargeFiles"]; daysStr != "" {
days, err := strconv.Atoi(daysStr)
if err != nil {
return nil, fmt.Errorf("bad daysFromStartingToCancelingUnfinishedLargeFiles: %w", err)
}
newRule.DaysFromStartingToCancelingUnfinishedLargeFiles = &days
}
bucketName, _ := f.split("")
if bucketName == "" {
return nil, errors.New("bucket required")
}
skip := operations.SkipDestructive(ctx, name, "update lifecycle rules")
var bucket *api.Bucket
if !skip && (newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil || newRule.DaysFromStartingToCancelingUnfinishedLargeFiles != nil) {
if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
bucketID, err := f.getBucketID(ctx, bucketName)
if err != nil {
return nil, err

View File

@@ -5,7 +5,6 @@ import (
"crypto/sha1"
"fmt"
"path"
"sort"
"strings"
"testing"
"time"
@@ -14,7 +13,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/bucket"
@@ -465,161 +463,24 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
})
t.Run("Cleanup", func(t *testing.T) {
t.Run("DryRun", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should be unchanged after dry run
before := listAllFiles(ctx, t, f, dirName)
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, true, false, 0))
after := listAllFiles(ctx, t, f, dirName)
assert.Equal(t, before, after)
})
t.Run("RealThing", func(t *testing.T) {
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
// Listing should reflect current state after cleanup
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
})
require.NoError(t, f.cleanUp(ctx, true, false, 0))
items := append([]fstest.Item{newItem}, fstests.InternalTestFiles...)
fstest.CheckListing(t, f, items)
// Set --b2-versions for this test
f.opt.Versions = true
defer func() {
f.opt.Versions = false
}()
fstest.CheckListing(t, f, items)
})
// Purge gets tested later
}
func (f *Fs) InternalTestCleanupUnfinished(t *testing.T) {
ctx := context.Background()
// B2CleanupHidden tests cleaning up hidden files
t.Run("CleanupUnfinished", func(t *testing.T) {
dirName := "unfinished"
fileCount := 5
expectedFiles := []string{}
for i := 1; i < fileCount; i++ {
fileName := fmt.Sprintf("%s/unfinished-%d", dirName, i)
expectedFiles = append(expectedFiles, fileName)
obj := &Object{
fs: f,
remote: fileName,
}
objInfo := object.NewStaticObjectInfo(fileName, fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil)
_, err := f.newLargeUpload(ctx, obj, nil, objInfo, f.opt.ChunkSize, false, nil)
require.NoError(t, err)
}
checkListing(ctx, t, f, dirName, expectedFiles)
t.Run("DryRun", func(t *testing.T) {
// Listing should not change after dry run
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, expectedFiles)
})
t.Run("RealThing", func(t *testing.T) {
// Listing should be empty after real cleanup
require.NoError(t, f.cleanUp(ctx, false, true, 0))
checkListing(ctx, t, f, dirName, []string{})
})
})
}
func listAllFiles(ctx context.Context, t *testing.T, f *Fs, dirName string) []string {
bucket, directory := f.split(dirName)
foundFiles := []string{}
require.NoError(t, f.list(ctx, bucket, directory, "", false, true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
foundFiles = append(foundFiles, object.Name)
}
return nil
}))
sort.Strings(foundFiles)
return foundFiles
}
func checkListing(ctx context.Context, t *testing.T, f *Fs, dirName string, expectedFiles []string) {
foundFiles := listAllFiles(ctx, t, f, dirName)
sort.Strings(expectedFiles)
assert.Equal(t, expectedFiles, foundFiles)
}
func (f *Fs) InternalTestLifecycleRules(t *testing.T) {
ctx := context.Background()
opt := map[string]string{}
t.Run("InitState", func(t *testing.T) {
// There should be no lifecycle rules at the outset
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("DryRun", func(t *testing.T) {
// There should still be no lifecycle rules after each dry run operation
ctx, ci := fs.AddConfig(ctx)
ci.DryRun = true
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 0, len(lifecycleRules))
})
t.Run("RealThing", func(t *testing.T) {
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err := f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules := lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
delete(opt, "daysFromHidingToDeleting")
opt["daysFromUploadingToHiding"] = "40"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
opt["daysFromHidingToDeleting"] = "30"
lifecycleRulesIf, err = f.lifecycleCommand(ctx, "lifecycle", nil, opt)
lifecycleRules = lifecycleRulesIf.([]api.LifecycleRule)
require.NoError(t, err)
assert.Equal(t, 1, len(lifecycleRules))
assert.Equal(t, 30, *lifecycleRules[0].DaysFromHidingToDeleting)
assert.Equal(t, 40, *lifecycleRules[0].DaysFromUploadingToHiding)
})
}
// -run TestIntegration/FsMkdir/FsPutFiles/Internal
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Metadata", f.InternalTestMetadata)
t.Run("Versions", f.InternalTestVersions)
t.Run("CleanupUnfinished", f.InternalTestCleanupUnfinished)
t.Run("LifecycleRules", f.InternalTestLifecycleRules)
}
var _ fstests.InternalTester = (*Fs)(nil)

View File

@@ -46,6 +46,7 @@ import (
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"github.com/youmark/pkcs8"
"golang.org/x/oauth2"
)
const (
@@ -64,10 +65,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://app.box.com/api/oauth2/authorize",
TokenURL: "https://app.box.com/api/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -255,9 +258,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
if block == nil {
return nil, errors.New("box: failed to PEM decode private key")
}
if len(rest) > 0 {
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
}

View File

@@ -1,48 +0,0 @@
// Package api has type definitions for cloudinary
package api
import (
"fmt"
)
// CloudinaryEncoder extends the built-in encoder
type CloudinaryEncoder interface {
// FromStandardPath takes a / separated path in Standard encoding
// and converts it to a / separated path in this encoding.
FromStandardPath(string) string
// FromStandardName takes name in Standard encoding and converts
// it in this encoding.
FromStandardName(string) string
// ToStandardPath takes a / separated path in this encoding
// and converts it to a / separated path in Standard encoding.
ToStandardPath(string) string
// ToStandardName takes name in this encoding and converts
// it in Standard encoding.
ToStandardName(string) string
// Encoded root of the remote (as passed into NewFs)
FromStandardFullPath(string) string
}
// UpdateOptions was created to pass options from Update to Put
type UpdateOptions struct {
PublicID string
ResourceType string
DeliveryType string
AssetFolder string
DisplayName string
}
// Header formats the option as a string
func (o *UpdateOptions) Header() (string, string) {
return "UpdateOption", fmt.Sprintf("%s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}
// Mandatory returns whether the option must be parsed or can be ignored
func (o *UpdateOptions) Mandatory() bool {
return false
}
// String formats the option into human-readable form
func (o *UpdateOptions) String() string {
return fmt.Sprintf("Fully qualified Public ID: %s/%s/%s", o.ResourceType, o.DeliveryType, o.PublicID)
}

View File

@@ -1,711 +0,0 @@
// Package cloudinary provides an interface to the Cloudinary DAM
package cloudinary
import (
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"path"
"strconv"
"strings"
"time"
"github.com/cloudinary/cloudinary-go/v2"
SDKApi "github.com/cloudinary/cloudinary-go/v2/api"
"github.com/cloudinary/cloudinary-go/v2/api/admin"
"github.com/cloudinary/cloudinary-go/v2/api/admin/search"
"github.com/cloudinary/cloudinary-go/v2/api/uploader"
"github.com/rclone/rclone/backend/cloudinary/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"github.com/zeebo/blake3"
)
// Cloudinary shouldn't have a trailing dot if there is no path
func cldPathDir(somePath string) string {
if somePath == "" || somePath == "." {
return somePath
}
dir := path.Dir(somePath)
if dir == "." {
return ""
}
return dir
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "cloudinary",
Description: "Cloudinary",
NewFs: NewFs,
Options: []fs.Option{
{
Name: "cloud_name",
Help: "Cloudinary Environment Name",
Required: true,
Sensitive: true,
},
{
Name: "api_key",
Help: "Cloudinary API Key",
Required: true,
Sensitive: true,
},
{
Name: "api_secret",
Help: "Cloudinary API Secret",
Required: true,
Sensitive: true,
},
{
Name: "upload_prefix",
Help: "Specify the API endpoint for environments out of the US",
},
{
Name: "upload_preset",
Help: "Upload Preset to select asset manipulation on upload",
},
{
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.Base | // Slash,LtGt,DoubleQuote,Question,Asterisk,Pipe,Hash,Percent,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
encoder.EncodeSlash |
encoder.EncodeLtGt |
encoder.EncodeDoubleQuote |
encoder.EncodeQuestion |
encoder.EncodeAsterisk |
encoder.EncodePipe |
encoder.EncodeHash |
encoder.EncodePercent |
encoder.EncodeBackSlash |
encoder.EncodeDel |
encoder.EncodeCtl |
encoder.EncodeRightSpace |
encoder.EncodeInvalidUtf8 |
encoder.EncodeDot),
},
{
Name: "eventually_consistent_delay",
Default: fs.Duration(0),
Advanced: true,
Help: "Wait N seconds for eventual consistency of the databases that support the backend operation",
},
},
})
}
// Options defines the configuration for this backend
type Options struct {
CloudName string `config:"cloud_name"`
APIKey string `config:"api_key"`
APISecret string `config:"api_secret"`
UploadPrefix string `config:"upload_prefix"`
UploadPreset string `config:"upload_preset"`
Enc encoder.MultiEncoder `config:"encoding"`
EventuallyConsistentDelay fs.Duration `config:"eventually_consistent_delay"`
}
// Fs represents a remote cloudinary server
type Fs struct {
name string
root string
opt Options
features *fs.Features
pacer *fs.Pacer
srv *rest.Client // For downloading assets via the Cloudinary CDN
cld *cloudinary.Cloudinary // API calls are going through the Cloudinary SDK
lastCRUD time.Time
}
// Object describes a cloudinary object
type Object struct {
fs *Fs
remote string
size int64
modTime time.Time
url string
md5sum string
publicID string
resourceType string
deliveryType string
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(ctx context.Context, name string, root string, m configmap.Mapper) (fs.Fs, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Initialize the Cloudinary client
cld, err := cloudinary.NewFromParams(opt.CloudName, opt.APIKey, opt.APISecret)
if err != nil {
return nil, fmt.Errorf("failed to create Cloudinary client: %w", err)
}
cld.Admin.Client = *fshttp.NewClient(ctx)
cld.Upload.Client = *fshttp.NewClient(ctx)
if opt.UploadPrefix != "" {
cld.Config.API.UploadPrefix = opt.UploadPrefix
}
client := fshttp.NewClient(ctx)
f := &Fs{
name: name,
root: root,
opt: *opt,
cld: cld,
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(1000), pacer.MaxSleep(10000), pacer.DecayConstant(2))),
srv: rest.NewClient(client),
}
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if root != "" {
// Check to see if the root actually an existing file
remote := path.Base(root)
f.root = cldPathDir(root)
_, err := f.NewObject(ctx, remote)
if err != nil {
if err == fs.ErrorObjectNotFound || errors.Is(err, fs.ErrorNotAFile) {
// File doesn't exist so return the previous root
f.root = root
return f, nil
}
return nil, err
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
return f, nil
}
// ------------------------------------------------------------
// FromStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardPath(s), "&", "\uFF06")
}
// FromStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) FromStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.FromStandardName(s), "&", "\uFF06")
}
// ToStandardPath implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardPath(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardPath(s), "\uFF06", "&")
}
// ToStandardName implementation of the api.CloudinaryEncoder
func (f *Fs) ToStandardName(s string) string {
return strings.ReplaceAll(f.opt.Enc.ToStandardName(s), "\uFF06", "&")
}
// FromStandardFullPath encodes a full path to Cloudinary standard
func (f *Fs) FromStandardFullPath(dir string) string {
return path.Join(api.CloudinaryEncoder.FromStandardPath(f, f.root), api.CloudinaryEncoder.FromStandardPath(f, dir))
}
// ToAssetFolderAPI encodes folders as expected by the Cloudinary SDK
func (f *Fs) ToAssetFolderAPI(dir string) string {
return strings.ReplaceAll(dir, "%", "%25")
}
// ToDisplayNameElastic encodes a special case of elasticsearch
func (f *Fs) ToDisplayNameElastic(dir string) string {
return strings.ReplaceAll(dir, "!", "\\!")
}
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// WaitEventuallyConsistent waits till the FS is eventually consistent
func (f *Fs) WaitEventuallyConsistent() {
if f.opt.EventuallyConsistentDelay == fs.Duration(0) {
return
}
delay := time.Duration(f.opt.EventuallyConsistentDelay)
timeSinceLastCRUD := time.Since(f.lastCRUD)
if timeSinceLastCRUD < delay {
time.Sleep(delay - timeSinceLastCRUD)
}
}
// String converts this Fs to a string
func (f *Fs) String() string {
return fmt.Sprintf("Cloudinary root '%s'", f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// List the objects and directories in dir into entries
func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
remotePrefix := f.FromStandardFullPath(dir)
if remotePrefix != "" && !strings.HasSuffix(remotePrefix, "/") {
remotePrefix += "/"
}
var entries fs.DirEntries
dirs := make(map[string]struct{})
nextCursor := ""
f.WaitEventuallyConsistent()
for {
// user the folders api to list folders.
folderParams := admin.SubFoldersParams{
Folder: f.ToAssetFolderAPI(remotePrefix),
MaxResults: 500,
}
if nextCursor != "" {
folderParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return nil, fmt.Errorf("failed to list sub-folders: %w", err)
}
if results.Error.Message != "" {
if strings.HasPrefix(results.Error.Message, "Can't find folder with path") {
return nil, fs.ErrorDirNotFound
}
return nil, fmt.Errorf("failed to list sub-folders: %s", results.Error.Message)
}
for _, folder := range results.Folders {
relativePath := api.CloudinaryEncoder.ToStandardPath(f, strings.TrimPrefix(folder.Path, remotePrefix))
parts := strings.Split(relativePath, "/")
// It's a directory
dirName := parts[len(parts)-1]
if _, found := dirs[dirName]; !found {
d := fs.NewDir(path.Join(dir, dirName), time.Time{})
entries = append(entries, d)
dirs[dirName] = struct{}{}
}
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
for {
// Use the assets.AssetsByAssetFolder API to list assets
assetsParams := admin.AssetsByAssetFolderParams{
AssetFolder: remotePrefix,
MaxResults: 500,
}
if nextCursor != "" {
assetsParams.NextCursor = nextCursor
}
results, err := f.cld.Admin.AssetsByAssetFolder(ctx, assetsParams)
if err != nil {
return nil, fmt.Errorf("failed to list assets: %w", err)
}
for _, asset := range results.Assets {
remote := api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName)
if dir != "" {
remote = path.Join(dir, api.CloudinaryEncoder.ToStandardName(f, asset.DisplayName))
}
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.CreatedAt,
url: asset.SecureURL,
publicID: asset.PublicID,
resourceType: asset.AssetType,
deliveryType: asset.Type,
}
entries = append(entries, o)
}
// Break if there are no more results
if results.NextCursor == "" {
break
}
nextCursor = results.NextCursor
}
return entries, nil
}
// NewObject finds the Object at remote. If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
searchParams := search.Query{
Expression: fmt.Sprintf("asset_folder:\"%s\" AND display_name:\"%s\"",
f.FromStandardFullPath(cldPathDir(remote)),
f.ToDisplayNameElastic(api.CloudinaryEncoder.FromStandardName(f, path.Base(remote)))),
SortBy: []search.SortByField{{"uploaded_at": "desc"}},
MaxResults: 2,
}
var results *admin.SearchResult
f.WaitEventuallyConsistent()
err := f.pacer.Call(func() (bool, error) {
var err1 error
results, err1 = f.cld.Admin.Search(ctx, searchParams)
if err1 == nil && results.TotalCount != len(results.Assets) {
err1 = errors.New("partial response so waiting for eventual consistency")
}
return shouldRetry(ctx, nil, err1)
})
if err != nil {
return nil, fs.ErrorObjectNotFound
}
if results.TotalCount == 0 || len(results.Assets) == 0 {
return nil, fs.ErrorObjectNotFound
}
asset := results.Assets[0]
o := &Object{
fs: f,
remote: remote,
size: int64(asset.Bytes),
modTime: asset.UploadedAt,
url: asset.SecureURL,
md5sum: asset.Etag,
publicID: asset.PublicID,
resourceType: asset.ResourceType,
deliveryType: asset.Type,
}
return o, nil
}
func (f *Fs) getSuggestedPublicID(assetFolder string, displayName string, modTime time.Time) string {
payload := []byte(path.Join(assetFolder, displayName))
hash := blake3.Sum256(payload)
return hex.EncodeToString(hash[:])
}
// Put uploads content to Cloudinary
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if src.Size() == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}
params := uploader.UploadParams{
UploadPreset: f.opt.UploadPreset,
}
updateObject := false
var modTime time.Time
for _, option := range options {
if updateOptions, ok := option.(*api.UpdateOptions); ok {
if updateOptions.PublicID != "" {
updateObject = true
params.Overwrite = SDKApi.Bool(true)
params.Invalidate = SDKApi.Bool(true)
params.PublicID = updateOptions.PublicID
params.ResourceType = updateOptions.ResourceType
params.Type = SDKApi.DeliveryType(updateOptions.DeliveryType)
params.AssetFolder = updateOptions.AssetFolder
params.DisplayName = updateOptions.DisplayName
modTime = src.ModTime(ctx)
}
}
}
if !updateObject {
params.AssetFolder = f.FromStandardFullPath(cldPathDir(src.Remote()))
params.DisplayName = api.CloudinaryEncoder.FromStandardName(f, path.Base(src.Remote()))
// We want to conform to the unique asset ID of rclone, which is (asset_folder,display_name,last_modified).
// We also want to enable customers to choose their own public_id, in case duplicate names are not a crucial use case.
// Upload_presets that apply randomness to the public ID would not work well with rclone duplicate assets support.
params.FilenameOverride = f.getSuggestedPublicID(params.AssetFolder, params.DisplayName, src.ModTime(ctx))
}
uploadResult, err := f.cld.Upload.Upload(ctx, in, params)
f.lastCRUD = time.Now()
if err != nil {
return nil, fmt.Errorf("failed to upload to Cloudinary: %w", err)
}
if !updateObject {
modTime = uploadResult.CreatedAt
}
if uploadResult.Error.Message != "" {
return nil, errors.New(uploadResult.Error.Message)
}
o := &Object{
fs: f,
remote: src.Remote(),
size: int64(uploadResult.Bytes),
modTime: modTime,
url: uploadResult.SecureURL,
md5sum: uploadResult.Etag,
publicID: uploadResult.PublicID,
resourceType: uploadResult.ResourceType,
deliveryType: uploadResult.Type,
}
return o, nil
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return fs.ModTimeNotSupported
}
// Hashes returns the supported hash sets
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// Mkdir creates empty folders
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
params := admin.CreateFolderParams{Folder: f.ToAssetFolderAPI(f.FromStandardFullPath(dir))}
res, err := f.cld.Admin.CreateFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
return nil
}
// Rmdir deletes empty folders
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
// Additional test because Cloudinary will delete folders without
// assets, regardless of empty sub-folders
folder := f.ToAssetFolderAPI(f.FromStandardFullPath(dir))
folderParams := admin.SubFoldersParams{
Folder: folder,
MaxResults: 1,
}
results, err := f.cld.Admin.SubFolders(ctx, folderParams)
if err != nil {
return err
}
if results.TotalCount > 0 {
return fs.ErrorDirectoryNotEmpty
}
params := admin.DeleteFolderParams{Folder: folder}
res, err := f.cld.Admin.DeleteFolder(ctx, params)
f.lastCRUD = time.Now()
if err != nil {
return err
}
if res.Error.Message != "" {
if strings.HasPrefix(res.Error.Message, "Can't find folder with path") {
return fs.ErrorDirNotFound
}
return errors.New(res.Error.Message)
}
return nil
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
420, // Too Many Requests (legacy)
429, // Too Many Requests
500, // Internal Server Error
502, // Bad Gateway
503, // Service Unavailable
504, // Gateway Timeout
509, // Bandwidth Limit Exceeded
}
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err != nil {
tryAgain := "Try again on "
if idx := strings.Index(err.Error(), tryAgain); idx != -1 {
layout := "2006-01-02 15:04:05 UTC"
dateStr := err.Error()[idx+len(tryAgain) : idx+len(tryAgain)+len(layout)]
timestamp, err2 := time.Parse(layout, dateStr)
if err2 == nil {
return true, fserrors.NewErrorRetryAfter(time.Until(timestamp))
}
}
fs.Debugf(nil, "Retrying API error %v", err)
return true, err
}
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// ------------------------------------------------------------
// Hash returns the MD5 of an object
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 {
return "", hash.ErrUnsupported
}
return o.md5sum, nil
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// ModTime returns the modification time of the object
func (o *Object) ModTime(ctx context.Context) time.Time {
return o.modTime
}
// Size of object in bytes
func (o *Object) Size() int64 {
return o.size
}
// Storable returns if this object is storable
func (o *Object) Storable() bool {
return true
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
return fs.ErrorCantSetModTime
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
var resp *http.Response
opts := rest.Opts{
Method: "GET",
RootURL: o.url,
Options: options,
}
var offset int64
var count int64
var key string
var value string
fs.FixRangeOption(options, o.size)
for _, option := range options {
switch x := option.(type) {
case *fs.RangeOption:
offset, count = x.Decode(o.size)
if count < 0 {
count = o.size - offset
}
key, value = option.Header()
case *fs.SeekOption:
offset = x.Offset
count = o.size - offset
key, value = option.Header()
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
if key != "" && value != "" {
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders[key] = value
}
// Make sure that the asset is fully available
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)
if err == nil {
cl, clErr := strconv.Atoi(resp.Header.Get("content-length"))
if clErr == nil && count == int64(cl) {
return false, nil
}
}
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed download of \"%s\": %w", o.url, err)
}
return resp.Body, err
}
// Update the object with the contents of the io.Reader
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
options = append(options, &api.UpdateOptions{
PublicID: o.publicID,
ResourceType: o.resourceType,
DeliveryType: o.deliveryType,
DisplayName: api.CloudinaryEncoder.FromStandardName(o.fs, path.Base(o.Remote())),
AssetFolder: o.fs.FromStandardFullPath(cldPathDir(o.Remote())),
})
updatedObj, err := o.fs.Put(ctx, in, src, options...)
if err != nil {
return err
}
if uo, ok := updatedObj.(*Object); ok {
o.size = uo.size
o.modTime = time.Now() // Skipping uo.modTime because the API returns the create time
o.url = uo.url
o.md5sum = uo.md5sum
o.publicID = uo.publicID
o.resourceType = uo.resourceType
o.deliveryType = uo.deliveryType
}
return nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
params := uploader.DestroyParams{
PublicID: o.publicID,
ResourceType: o.resourceType,
Type: o.deliveryType,
}
res, dErr := o.fs.cld.Upload.Destroy(ctx, params)
o.fs.lastCRUD = time.Now()
if dErr != nil {
return dErr
}
if res.Error.Message != "" {
return errors.New(res.Error.Message)
}
if res.Result != "ok" {
return errors.New(res.Result)
}
return nil
}

View File

@@ -1,23 +0,0 @@
// Test Cloudinary filesystem interface
package cloudinary_test
import (
"testing"
"github.com/rclone/rclone/backend/cloudinary"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestCloudinary"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*cloudinary.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

View File

@@ -80,10 +80,9 @@ const (
// Globals
var (
// Description of how to auth for this app
driveConfig = &oauthutil.Config{
driveConfig = &oauth2.Config{
Scopes: []string{scopePrefix + "drive"},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -3525,14 +3524,14 @@ func (f *Fs) unTrashDir(ctx context.Context, dir string, recurse bool) (r unTras
return f.unTrash(ctx, dir, directoryID, true)
}
// copy or move file with id to dest
func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string) (err error) {
// copy file with id to dest
func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
info, err := f.getFile(ctx, id, f.getFileFields(ctx))
if err != nil {
return fmt.Errorf("couldn't find id: %w", err)
}
if info.MimeType == driveFolderType {
return fmt.Errorf("can't %s directory use: rclone %s --drive-root-folder-id %s %s %s", operation, operation, id, fs.ConfigString(f), dest)
return fmt.Errorf("can't copy directory use: rclone copy --drive-root-folder-id %s %s %s", id, fs.ConfigString(f), dest)
}
info.Name = f.opt.Enc.ToStandardName(info.Name)
o, err := f.newObjectWithInfo(ctx, info.Name, info)
@@ -3553,15 +3552,9 @@ func (f *Fs) copyOrMoveID(ctx context.Context, operation string, id, dest string
if err != nil {
return err
}
var opErr error
if operation == "moveid" {
_, opErr = operations.Move(ctx, dstFs, nil, destLeaf, o)
} else {
_, opErr = operations.Copy(ctx, dstFs, nil, destLeaf, o)
}
if opErr != nil {
return fmt.Errorf("%s failed: %w", operation, opErr)
_, err = operations.Copy(ctx, dstFs, nil, destLeaf, o)
if err != nil {
return fmt.Errorf("copy failed: %w", err)
}
return nil
}
@@ -3798,28 +3791,6 @@ attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be copied before copying.
`,
}, {
Name: "moveid",
Short: "Move files by ID",
Long: `This command moves files by ID
Usage:
rclone backend moveid drive: ID path
rclone backend moveid drive: ID1 path1 ID2 path2
It moves the drive file with ID given to the path (an rclone path which
will be passed internally to rclone moveto).
The path should end with a / to indicate move the file as named to
this directory. If it doesn't end with a / then the last path
component will be used as the file name.
If the destination is a drive backend then server-side moving will be
attempted if possible.
Use the --interactive/-i or --dry-run flag to see what would be moved beforehand.
`,
}, {
Name: "exportformats",
Short: "Dump the export formats for debug purposes",
@@ -3998,16 +3969,16 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
dir = arg[0]
}
return f.unTrashDir(ctx, dir, true)
case "copyid", "moveid":
case "copyid":
if len(arg)%2 != 0 {
return nil, errors.New("need an even number of arguments")
}
for len(arg) > 0 {
id, dest := arg[0], arg[1]
arg = arg[2:]
err = f.copyOrMoveID(ctx, name, id, dest)
err = f.copyID(ctx, id, dest)
if err != nil {
return nil, fmt.Errorf("failed %s %q to %q: %w", name, id, dest, err)
return nil, fmt.Errorf("failed copying %q to %q: %w", id, dest, err)
}
}
return nil, nil

View File

@@ -479,8 +479,8 @@ func (f *Fs) InternalTestUnTrash(t *testing.T) {
require.NoError(t, f.Purge(ctx, "trashDir"))
}
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyOrMoveID
func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/CopyID
func (f *Fs) InternalTestCopyID(t *testing.T) {
ctx := context.Background()
obj, err := f.NewObject(ctx, existingFile)
require.NoError(t, err)
@@ -498,7 +498,7 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
}
t.Run("BadID", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", "ID-NOT-FOUND", dir+"/")
err = f.copyID(ctx, "ID-NOT-FOUND", dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "couldn't find id")
})
@@ -506,31 +506,19 @@ func (f *Fs) InternalTestCopyOrMoveID(t *testing.T) {
t.Run("Directory", func(t *testing.T) {
rootID, err := f.dirCache.RootID(ctx, false)
require.NoError(t, err)
err = f.copyOrMoveID(ctx, "moveid", rootID, dir+"/")
err = f.copyID(ctx, rootID, dir+"/")
require.Error(t, err)
assert.Contains(t, err.Error(), "can't moveid directory")
assert.Contains(t, err.Error(), "can't copy directory")
})
t.Run("MoveWithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/")
t.Run("WithoutDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("CopyWithoutDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/")
require.NoError(t, err)
checkFile(path.Base(existingFile))
})
t.Run("MoveWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "moveid", o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
t.Run("CopyWithDestName", func(t *testing.T) {
err = f.copyOrMoveID(ctx, "copyid", o.id, dir+"/potato.txt")
t.Run("WithDestName", func(t *testing.T) {
err = f.copyID(ctx, o.id, dir+"/potato.txt")
require.NoError(t, err)
checkFile("potato.txt")
})
@@ -659,7 +647,7 @@ func (f *Fs) InternalTest(t *testing.T) {
})
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyOrMoveID", f.InternalTestCopyOrMoveID)
t.Run("CopyID", f.InternalTestCopyID)
t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)

View File

@@ -11,6 +11,7 @@ import (
"fmt"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs/fserrors"
)
// finishBatch commits the batch, returning a batch status to poll or maybe complete
@@ -20,10 +21,14 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
}
err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg)
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything except the excluded errors
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {

View File

@@ -94,7 +94,7 @@ const (
var (
// Description of how to auth for this app
dropboxConfig = &oauthutil.Config{
dropboxConfig = &oauth2.Config{
Scopes: []string{
"files.metadata.write",
"files.content.write",
@@ -109,8 +109,7 @@ var (
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
// },
AuthURL: dropbox.OAuthEndpoint("").AuthURL,
TokenURL: dropbox.OAuthEndpoint("").TokenURL,
Endpoint: dropbox.OAuthEndpoint(""),
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -135,7 +134,7 @@ var (
)
// Gets an oauth config with the right scopes
func getOauthConfig(m configmap.Mapper) *oauthutil.Config {
func getOauthConfig(m configmap.Mapper) *oauth2.Config {
// If not impersonating, use standard scopes
if impersonate, _ := m.Get("impersonate"); impersonate == "" {
return dropboxConfig
@@ -318,46 +317,32 @@ func (f *Fs) Features() *fs.Features {
return f.features
}
// Some specific errors which should be excluded from retries
func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
if err == nil {
return false, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// First check for specific errors
//
// These come back from the SDK in a whole host of different
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
if err == nil {
return false, err
}
errString := err.Error()
// First check for specific errors
if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err)
}
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) {
case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 {
fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
}
return true, err
}
// Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err
}
@@ -1714,10 +1699,14 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil)
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
// If error is insufficient space then don't retry
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
}
// after the first chunk is uploaded, we retry everything except the excluded errors
// after the first chunk is uploaded, we retry everything
return err != nil, err
})
if err != nil {

View File

@@ -60,17 +60,14 @@ const (
minSleep = 10 * time.Millisecond
)
var (
// Description of how to auth for this app
storageConfig = &oauthutil.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
)
// Description of how to auth for this app
var storageConfig = &oauth2.Config{
Scopes: []string{storage.DevstorageReadWriteScope},
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
}
// Register with Fs
func init() {

View File

@@ -33,6 +33,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
@@ -59,14 +60,13 @@ const (
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
oauthConfig = &oauth2.Config{
Scopes: []string{
"openid",
"profile",
scopeReadWrite, // this must be at position scopeAccess
},
AuthURL: google.Endpoint.AuthURL,
TokenURL: google.Endpoint.TokenURL,
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
@@ -1168,7 +1168,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
errors := make([]error, 1)
results := make([]*api.MediaItem, 1)
err = o.fs.commitBatch(ctx, []uploadedItem{uploaded}, results, errors)
if err == nil {
if err != nil {
err = errors[0]
info = results[0]
}

View File

@@ -2,7 +2,6 @@ package googlephotos
import (
"context"
"errors"
"fmt"
"io"
"net/http"
@@ -36,7 +35,7 @@ func TestIntegration(t *testing.T) {
*fstest.RemoteName = "TestGooglePhotos:"
}
f, err := fs.NewFs(ctx, *fstest.RemoteName)
if errors.Is(err, fs.ErrorNotFoundInConfigFile) {
if err == fs.ErrorNotFoundInConfigFile {
t.Skipf("Couldn't create google photos backend - skipping tests: %v", err)
}
require.NoError(t, err)

View File

@@ -31,6 +31,7 @@ import (
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -47,9 +48,11 @@ const (
// Globals
var (
// Description of how to auth for this app.
oauthConfig = &oauthutil.Config{
AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.hidrive.com/client/authorize",
TokenURL: "https://my.hidrive.com/oauth2/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.TitleBarRedirectURL,

View File

@@ -331,13 +331,12 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// Join's the remote onto the base URL
func (f *Fs) url(remote string) string {
trimmedRemote := strings.TrimLeft(remote, "/") // remove leading "/" since we always have it in f.endpointURL
if f.opt.NoEscape {
// Directly concatenate without escaping, no_escape behavior
return f.endpointURL + trimmedRemote
return f.endpointURL + remote
}
// Default behavior
return f.endpointURL + rest.URLPathEscape(trimmedRemote)
return f.endpointURL + rest.URLPathEscape(remote)
}
// Errors returned by parseName

View File

@@ -191,33 +191,6 @@ func TestNewObject(t *testing.T) {
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestNewObjectWithLeadingSlash(t *testing.T) {
f := prepare(t)
o, err := f.NewObject(context.Background(), "/four/under four.txt")
require.NoError(t, err)
assert.Equal(t, "/four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
// Test the time is correct on the object
tObj := o.ModTime(context.Background())
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
require.NoError(t, err)
tFile := fi.ModTime()
fstest.AssertTimeEqualWithPrecision(t, o.Remote(), tFile, tObj, time.Second)
// check object not found
o, err = f.NewObject(context.Background(), "/not found.txt")
assert.Nil(t, o)
assert.Equal(t, fs.ErrorObjectNotFound, err)
}
func TestOpen(t *testing.T) {
m := prepareServer(t)

View File

@@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
}
// build request
// can't use normal rename as file needs to be "activated" first
// cant use normal rename as file needs to be "activated" first
r := api.NewUpdateFileInfo()
r.DocumentID = doc.DocumentID

View File

@@ -75,7 +75,7 @@ type MoveFolderParam struct {
DestinationPath string `validate:"nonzero" json:"destinationPath"`
}
// JobIDResponse represents response struct with JobID for folder operations
// JobIDResponse respresents response struct with JobID for folder operations
type JobIDResponse struct {
JobID string `json:"jobId"`
}

View File

@@ -151,19 +151,6 @@ Owner is able to add custom keys. Metadata feature grabs all the keys including
Help: "Host of InternetArchive Frontend.\n\nLeave blank for default value.",
Default: "https://archive.org",
Advanced: true,
}, {
Name: "item_metadata",
Help: `Metadata to be set on the IA item, this is different from file-level metadata that can be set using --metadata-set.
Format is key=value and the 'x-archive-meta-' prefix is automatically added.`,
Default: []string{},
Hide: fs.OptionHideConfigurator,
Advanced: true,
}, {
Name: "item_derive",
Help: `Whether to trigger derive on the IA item or not. If set to false, the item will not be derived by IA upon upload.
The derive process produces a number of secondary files from an upload to make an upload more usable on the web.
Setting this to false is useful for uploading files that are already in a format that IA can display or reduce burden on IA's infrastructure.`,
Default: true,
}, {
Name: "disable_checksum",
Help: `Don't ask the server to test against MD5 checksum calculated by rclone.
@@ -214,8 +201,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
FrontEndpoint string `config:"front_endpoint"`
DisableChecksum bool `config:"disable_checksum"`
ItemMetadata []string `config:"item_metadata"`
ItemDerive bool `config:"item_derive"`
WaitArchive fs.Duration `config:"wait_archive"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -805,23 +790,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
"x-amz-filemeta-rclone-update-track": updateTracker,
// we add some more headers for intuitive actions
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
"x-amz-auto-make-bucket": "1", // create an item if does not exist, do nothing if already
"x-archive-auto-make-bucket": "1", // same as above in IAS3 original way
"x-archive-keep-old-version": "0", // do not keep old versions (a.k.a. trashes in other clouds)
"x-archive-meta-mediatype": "data", // mark media type of the uploading file as "data"
"x-archive-queue-derive": "0", // skip derivation process (e.g. encoding to smaller files, OCR on PDFs)
"x-archive-cascade-delete": "1", // enable "cascate delete" (delete all derived files in addition to the file itself)
}
if size >= 0 {
headers["Content-Length"] = fmt.Sprintf("%d", size)
headers["x-archive-size-hint"] = fmt.Sprintf("%d", size)
}
// This is IA's ITEM metadata, not file metadata
headers, err = o.appendItemMetadataHeaders(headers, o.fs.opt)
if err != nil {
return err
}
var mdata fs.Metadata
mdata, err = fs.GetMetadataOptions(ctx, o.fs, src, options)
if err == nil && mdata != nil {
@@ -884,51 +863,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
func (o *Object) appendItemMetadataHeaders(headers map[string]string, options Options) (newHeaders map[string]string, err error) {
metadataCounter := make(map[string]int)
metadataValues := make(map[string][]string)
// First pass: count occurrences and collect values
for _, v := range options.ItemMetadata {
parts := strings.SplitN(v, "=", 2)
if len(parts) != 2 {
return newHeaders, errors.New("item metadata key=value should be in the form key=value")
}
key, value := parts[0], parts[1]
metadataCounter[key]++
metadataValues[key] = append(metadataValues[key], value)
}
// Second pass: add headers with appropriate prefixes
for key, count := range metadataCounter {
if count == 1 {
// Only one occurrence, use x-archive-meta-
headers[fmt.Sprintf("x-archive-meta-%s", key)] = metadataValues[key][0]
} else {
// Multiple occurrences, use x-archive-meta01-, x-archive-meta02-, etc.
for i, value := range metadataValues[key] {
headers[fmt.Sprintf("x-archive-meta%02d-%s", i+1, key)] = value
}
}
}
if o.fs.opt.ItemDerive {
headers["x-archive-queue-derive"] = "1"
} else {
headers["x-archive-queue-derive"] = "0"
}
fs.Debugf(o, "Setting IA item derive: %t", o.fs.opt.ItemDerive)
for k, v := range headers {
if strings.HasPrefix(k, "x-archive-meta") {
fs.Debugf(o, "Setting IA item metadata: %s=%s", k, v)
}
}
return headers, nil
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
bucket, bucketPath := o.split()

View File

@@ -277,9 +277,11 @@ machines.`)
m.Set(configClientID, teliaseCloudClientID)
m.Set(configTokenURL, teliaseCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: teliaseCloudAuthURL,
TokenURL: teliaseCloudTokenURL,
},
ClientID: teliaseCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -290,9 +292,11 @@ machines.`)
m.Set(configClientID, telianoCloudClientID)
m.Set(configTokenURL, telianoCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: telianoCloudAuthURL,
TokenURL: telianoCloudTokenURL,
},
ClientID: telianoCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -303,9 +307,11 @@ machines.`)
m.Set(configClientID, tele2CloudClientID)
m.Set(configTokenURL, tele2CloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: tele2CloudAuthURL,
TokenURL: tele2CloudTokenURL,
},
ClientID: tele2CloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -316,9 +322,11 @@ machines.`)
m.Set(configClientID, onlimeCloudClientID)
m.Set(configTokenURL, onlimeCloudTokenURL)
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
OAuth2Config: &oauthutil.Config{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
OAuth2Config: &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: onlimeCloudAuthURL,
TokenURL: onlimeCloudTokenURL,
},
ClientID: onlimeCloudClientID,
Scopes: []string{"openid", "jotta-default", "offline_access"},
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -916,17 +924,19 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
}
baseClient := fshttp.NewClient(ctx)
oauthConfig := &oauthutil.Config{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
oauthConfig := &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: defaultTokenURL,
TokenURL: defaultTokenURL,
},
}
if ver == configVersion {
oauthConfig.ClientID = defaultClientID
// if custom endpoints are set use them else stick with defaults
if tokenURL, ok := m.Get(configTokenURL); ok {
oauthConfig.TokenURL = tokenURL
oauthConfig.Endpoint.TokenURL = tokenURL
// jottacloud is weird. we need to use the tokenURL as authURL
oauthConfig.AuthURL = tokenURL
oauthConfig.Endpoint.AuthURL = tokenURL
}
} else if ver == legacyConfigVersion {
clientID, ok := m.Get(configClientID)
@@ -940,8 +950,8 @@ func getOAuthClient(ctx context.Context, name string, m configmap.Mapper) (oAuth
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
oauthConfig.TokenURL = legacyTokenURL
oauthConfig.AuthURL = legacyTokenURL
oauthConfig.Endpoint.TokenURL = legacyTokenURL
oauthConfig.Endpoint.AuthURL = legacyTokenURL
// add the request filter to fix token refresh
if do, ok := baseClient.Transport.(interface {

View File

@@ -5,18 +5,18 @@ package local
import (
"context"
"fmt"
"syscall"
"unsafe"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/windows"
)
var getFreeDiskSpace = windows.NewLazySystemDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var available, total, free int64
root, e := windows.UTF16PtrFromString(f.root)
root, e := syscall.UTF16PtrFromString(f.root)
if e != nil {
return nil, fmt.Errorf("failed to read disk usage: %w", e)
}
@@ -26,7 +26,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
)
if e1 != windows.Errno(0) {
if e1 != syscall.Errno(0) {
return nil, fmt.Errorf("failed to read disk usage: %w", e1)
}
usage := &fs.Usage{

View File

@@ -34,6 +34,7 @@ import (
// Constants
const (
devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
)
@@ -100,8 +101,10 @@ Metadata is supported on files and directories.
},
{
Name: "links",
Help: "Translate symlinks to/from regular files with a '" + fs.LinkSuffix + "' extension for the local backend.",
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
Default: false,
NoPrefix: true,
ShortOpt: "l",
Advanced: true,
},
{
@@ -376,22 +379,17 @@ type Directory struct {
var (
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
errLinksNeedsSuffix = errors.New("need \"" + fs.LinkSuffix + "\" suffix to refer to symlink when using -l/--links")
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
)
// NewFs constructs an Fs from the path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
// Override --local-links with --links if set
if ci.Links {
opt.TranslateSymlinks = true
}
if opt.TranslateSymlinks && opt.FollowSymlinks {
return nil, errLinksAndCopyLinks
}
@@ -437,9 +435,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.dev = readDevice(fi, f.opt.OneFileSystem)
}
// Check to see if this is a .rclonelink if not found
hasLinkSuffix := strings.HasSuffix(f.root, fs.LinkSuffix)
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
fi, err = f.lstat(strings.TrimSuffix(f.root, fs.LinkSuffix))
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
}
if err == nil && f.isRegular(fi.Mode()) {
// Handle the odd case, that a symlink was specified by name without the link suffix
@@ -510,8 +508,8 @@ func (f *Fs) caseInsensitive() bool {
//
// for regular files, localPath is returned unchanged
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
isTranslatedLink = strings.HasSuffix(remote, fs.LinkSuffix)
newLocalPath = strings.TrimSuffix(localPath, fs.LinkSuffix)
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
return newLocalPath, isTranslatedLink
}
@@ -694,7 +692,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
} else {
// Check whether this link should be translated
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
newRemote += fs.LinkSuffix
newRemote += linkSuffix
}
// Don't include non directory if not included
// we leave directory filtering to the layer above

View File

@@ -110,7 +110,7 @@ func TestSymlink(t *testing.T) {
require.NoError(t, lChtimes(symlinkPath, modTime2, modTime2))
// Object viewed as symlink
file2 := fstest.NewItem("symlink.txt"+fs.LinkSuffix, "file.txt", modTime2)
file2 := fstest.NewItem("symlink.txt"+linkSuffix, "file.txt", modTime2)
// Object viewed as destination
file2d := fstest.NewItem("symlink.txt", "hello", modTime1)
@@ -139,7 +139,7 @@ func TestSymlink(t *testing.T) {
// Create a symlink
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+fs.LinkSuffix, "file.txt", modTime3, false)
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
fstest.CheckListingWithPrecision(t, r.Flocal, []fstest.Item{file1, file2, file3}, nil, fs.ModTimeNotSupported)
if haveLChtimes {
r.CheckLocalItems(t, file1, file2, file3)
@@ -155,9 +155,9 @@ func TestSymlink(t *testing.T) {
assert.Equal(t, "file.txt", linkText)
// Check that NewObject gets the correct object
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+fs.LinkSuffix)
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
require.NoError(t, err)
assert.Equal(t, "symlink2.txt"+fs.LinkSuffix, o.Remote())
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
assert.Equal(t, int64(8), o.Size())
// Check that NewObject doesn't see the non suffixed version
@@ -165,7 +165,7 @@ func TestSymlink(t *testing.T) {
require.Equal(t, fs.ErrorObjectNotFound, err)
// Check that NewFs works with the suffixed version and --links
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+fs.LinkSuffix), configmap.Simple{
f2, err := NewFs(ctx, "local", filepath.Join(dir, "symlink2.txt"+linkSuffix), configmap.Simple{
"links": "true",
})
require.Equal(t, fs.ErrorIsFile, err)
@@ -277,7 +277,7 @@ func TestMetadata(t *testing.T) {
// Write a symlink to the file
symlinkPath := "metafile-link.txt"
osSymlinkPath := filepath.Join(f.root, symlinkPath)
symlinkPath += fs.LinkSuffix
symlinkPath += linkSuffix
require.NoError(t, os.Symlink(filePath, osSymlinkPath))
symlinkModTime := fstest.Time("2002-02-03T04:05:10.123123123Z")
require.NoError(t, lChtimes(osSymlinkPath, symlinkModTime, symlinkModTime))

View File

@@ -68,12 +68,14 @@ var (
)
// Description of how to authorize
var oauthConfig = &oauthutil.Config{
var oauthConfig = &oauth2.Config{
ClientID: api.OAuthClientID,
ClientSecret: "",
AuthURL: api.OAuthURL,
TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
Endpoint: oauth2.Endpoint{
AuthURL: api.OAuthURL,
TokenURL: api.OAuthURL,
AuthStyle: oauth2.AuthStyleInParams,
},
}
// Register with Fs
@@ -436,9 +438,7 @@ func (f *Fs) authorize(ctx context.Context, force bool) (err error) {
if err != nil || !tokenIsValid(t) {
fs.Infof(f, "Valid token not found, authorizing.")
ctx := oauthutil.Context(ctx, f.cli)
oauth2Conf := oauthConfig.MakeOauth2Config()
t, err = oauth2Conf.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
t, err = oauthConfig.PasswordCredentialsToken(ctx, f.opt.Username, f.opt.Password)
}
if err == nil && !tokenIsValid(t) {
err = errors.New("invalid token")

View File

@@ -40,6 +40,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -64,21 +65,14 @@ const (
// Globals
var (
// Define the paths used for token operations
commonPathPrefix = "/common" // prefix for the paths if tenant isn't known
authPath = "/oauth2/v2.0/authorize"
tokenPath = "/oauth2/v2.0/token"
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
scopeAccess = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "Sites.Read.All", "offline_access"}
scopeAccessWithoutSites = fs.SpaceSepList{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// When using client credential OAuth flow, scope of .default is required in order
// to use the permissions configured for the application within the tenant
scopeAccessClientCred = fs.SpaceSepList{".default"}
// Base config for how to auth
oauthConfig = &oauthutil.Config{
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Scopes: scopeAccess,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
@@ -131,7 +125,7 @@ func init() {
Help: "Microsoft Cloud for US Government",
}, {
Value: regionDE,
Help: "Microsoft Cloud Germany (deprecated - try " + regionGlobal + " region first).",
Help: "Microsoft Cloud Germany",
}, {
Value: regionCN,
Help: "Azure and Office 365 operated by Vnet Group in China",
@@ -189,14 +183,6 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
},
},
}, {
Name: "tenant",
Help: `ID of the service principal's tenant. Also called its directory ID.
Set this if using
- Client Credential flow
`,
Sensitive: true,
}, {
Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission.
@@ -541,54 +527,28 @@ func chooseDrive(ctx context.Context, name string, m configmap.Mapper, srv *rest
})
}
// Make the oauth config for the backend
func makeOauthConfig(ctx context.Context, opt *Options) (*oauthutil.Config, error) {
// Copy the default oauthConfig
oauthConfig := *oauthConfig
// Set the scopes
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
// Construct the auth URLs
prefix := commonPathPrefix
if opt.Tenant != "" {
prefix = "/" + opt.Tenant
}
oauthConfig.TokenURL = authEndpoint[opt.Region] + prefix + tokenPath
oauthConfig.AuthURL = authEndpoint[opt.Region] + prefix + authPath
// Check to see if we are using client credentials flow
if opt.ClientCredentials {
// Override scope to .default
oauthConfig.Scopes = scopeAccessClientCred
if opt.Tenant == "" {
return nil, fmt.Errorf("tenant parameter must be set when using %s", config.ConfigClientCredentials)
}
}
return &oauthConfig, nil
}
// Config the backend
func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.ConfigIn) (*fs.ConfigOut, error) {
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
_, graphURL := getRegionURL(m)
func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
region, graphURL := getRegionURL(m)
// Check to see if this is the start of the state machine execution
if conf.State == "" {
conf, err := makeOauthConfig(ctx, opt)
if config.State == "" {
var accessScopes fs.SpaceSepList
accessScopesString, _ := m.Get("access_scopes")
err := accessScopes.Set(accessScopesString)
if err != nil {
return nil, err
return nil, fmt.Errorf("failed to parse access_scopes: %w", err)
}
oauthConfig.Scopes = []string(accessScopes)
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
}
return oauthutil.ConfigOut("choose_type", &oauthutil.Options{
OAuth2Config: conf,
OAuth2Config: oauthConfig,
})
}
@@ -596,11 +556,9 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
if err != nil {
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
}
// Create a REST client, build on the OAuth client created above
srv := rest.NewClient(oAuthClient)
switch conf.State {
switch config.State {
case "choose_type":
return fs.ConfigChooseExclusiveFixed("choose_type_done", "config_type", "Type of connection", []fs.OptionExample{{
Value: "onedrive",
@@ -626,7 +584,7 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
}})
case "choose_type_done":
// Jump to next state according to config chosen
return fs.ConfigGoto(conf.Result)
return fs.ConfigGoto(config.Result)
case "onedrive":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
opts: rest.Opts{
@@ -644,22 +602,16 @@ func Config(ctx context.Context, name string, m configmap.Mapper, conf fs.Config
},
})
case "driveid":
out, err := fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
if err != nil {
return out, err
}
// Default the drive_id to the previous version in the config
out.Option.Default, _ = m.Get("drive_id")
return out, nil
return fs.ConfigInput("driveid_end", "config_driveid_fixed", "Drive ID")
case "driveid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
finalDriveID: conf.Result,
finalDriveID: config.Result,
})
case "siteid":
return fs.ConfigInput("siteid_end", "config_siteid", "Site ID")
case "siteid_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result,
siteID: config.Result,
})
case "url":
return fs.ConfigInput("url_end", "config_site_url", `Site URL
@@ -670,7 +622,7 @@ Examples:
- "https://XXX.sharepoint.com/teams/ID"
`)
case "url_end":
siteURL := conf.Result
siteURL := config.Result
re := regexp.MustCompile(`https://.*\.sharepoint\.com(/.*)`)
match := re.FindStringSubmatch(siteURL)
if len(match) == 2 {
@@ -685,12 +637,12 @@ Examples:
return fs.ConfigInput("path_end", "config_sharepoint_url", `Server-relative URL`)
case "path_end":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
relativePath: conf.Result,
relativePath: config.Result,
})
case "search":
return fs.ConfigInput("search_end", "config_search_term", `Search term`)
case "search_end":
searchTerm := conf.Result
searchTerm := config.Result
opts := rest.Opts{
Method: "GET",
RootURL: graphURL,
@@ -712,10 +664,10 @@ Examples:
})
case "search_sites":
return chooseDrive(ctx, name, m, srv, chooseDriveOpt{
siteID: conf.Result,
siteID: config.Result,
})
case "driveid_final":
finalDriveID := conf.Result
finalDriveID := config.Result
// Test the driveID and get drive type
opts := rest.Opts{
@@ -734,12 +686,12 @@ Examples:
return fs.ConfigConfirm("driveid_final_end", true, "config_drive_ok", fmt.Sprintf("Drive OK?\n\nFound drive %q of type %q\nURL: %s\n", rootItem.Name, rootItem.ParentReference.DriveType, rootItem.WebURL))
case "driveid_final_end":
if conf.Result == "true" {
if config.Result == "true" {
return nil, nil
}
return fs.ConfigGoto("choose_type")
}
return nil, fmt.Errorf("unknown state %q", conf.State)
return nil, fmt.Errorf("unknown state %q", config.State)
}
// Options defines the configuration for this backend
@@ -750,9 +702,7 @@ type Options struct {
DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"`
ClientCredentials bool `config:"client_credentials"`
AccessScopes fs.SpaceSepList `config:"access_scopes"`
Tenant string `config:"tenant"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
@@ -1040,10 +990,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
oauthConfig, err := makeOauthConfig(ctx, opt)
if err != nil {
return nil, err
oauthConfig.Scopes = opt.AccessScopes
if opt.DisableSitePermission {
oauthConfig.Scopes = scopeAccessWithoutSites
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
}
client := fshttp.NewClient(ctx)
@@ -2610,11 +2563,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errors.New("can't upload content to a OneNote file")
}
// Only start the renewer if we have a valid one
if o.fs.tokenRenewer != nil {
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
}
o.fs.tokenRenewer.Start()
defer o.fs.tokenRenewer.Stop()
size := src.Size()

View File

@@ -22,7 +22,6 @@ import (
"github.com/oracle/oci-go-sdk/v65/objectstorage"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
)
@@ -184,9 +183,6 @@ func (w *objectChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, rea
if ossPartNumber <= 8 {
return shouldRetry(ctx, resp.HTTPResponse(), err)
}
if fserrors.ContextError(ctx, &err) {
return false, err
}
// retry all chunks once have done the first few
return true, err
}

View File

@@ -106,9 +106,9 @@ func newOptions() []fs.Option {
Sensitive: true,
}, {
Name: "compartment",
Help: "Specify compartment OCID, if you need to list buckets.\n\nList objects works without compartment OCID.",
Help: "Object storage compartment OCID",
Provider: "!no_auth",
Required: false,
Required: true,
Sensitive: true,
}, {
Name: "region",

View File

@@ -48,10 +48,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://my.pcloud.com/oauth2/authorize",
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -59,8 +61,8 @@ var (
)
// Update the TokenURL with the actual hostname
func updateTokenURL(oauthConfig *oauthutil.Config, hostname string) {
oauthConfig.TokenURL = "https://" + hostname + "/oauth2_token"
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
}
// Register with Fs
@@ -77,7 +79,7 @@ func init() {
fs.Errorf(nil, "Failed to read config: %v", err)
}
updateTokenURL(oauthConfig, optc.Hostname)
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
return errors.New("form not found in response")
}
@@ -397,15 +399,14 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
if err != nil {
return nil, fmt.Errorf("open file: %w", err)
}
if _, err := fileClose(ctx, client, f.pacer, openResult.FileDescriptor); err != nil {
return nil, fmt.Errorf("close file: %w", err)
}
writer := &writerAt{
ctx: ctx,
client: client,
fs: f,
size: size,
remote: remote,
fd: openResult.FileDescriptor,
fileID: openResult.Fileid,
}
@@ -424,7 +425,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
})
// Set our own http client in the context
ctx = oauthutil.Context(ctx, baseClient)
// create a new oauth client, reuse the token source
// create a new oauth client, re-use the token source
oAuthClient := oauth2.NewClient(ctx, f.ts)
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
}

View File

@@ -18,14 +18,21 @@ import (
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
type writerAt struct {
ctx context.Context
client *rest.Client
fs *Fs
size int64
remote string
fd int64
fileID int64
}
// Close implements WriterAt.Close.
func (c *writerAt) Close() error {
// close fd
if _, err := c.fileClose(c.ctx); err != nil {
return fmt.Errorf("close fd: %w", err)
}
// Avoiding race conditions: Depending on the tcp connection, there might be
// caching issues when checking the size immediately after write.
// Hence we try avoiding them by checking the resulting size on a different connection.
@@ -65,18 +72,8 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
inSHA1Bytes := sha1.Sum(buffer)
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
client, err := c.fs.newSingleConnClient(c.ctx)
if err != nil {
return 0, fmt.Errorf("create client: %w", err)
}
openResult, err := fileOpen(c.ctx, client, c.fs, c.fileID)
if err != nil {
return 0, fmt.Errorf("open file: %w", err)
}
// get target hash
outChecksum, err := fileChecksum(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, int64(contentLength))
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
if err != nil {
return 0, err
}
@@ -92,15 +89,10 @@ func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
}
// upload buffer with offset if necessary
if _, err := filePWrite(c.ctx, client, c.fs.pacer, openResult.FileDescriptor, offset, buffer); err != nil {
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
return 0, err
}
// close fd
if _, err := fileClose(c.ctx, client, c.fs.pacer, openResult.FileDescriptor); err != nil {
return contentLength, fmt.Errorf("close fd: %w", err)
}
return contentLength, nil
}
@@ -133,40 +125,11 @@ func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, fi
return result, nil
}
// Call pcloud file_open using fileid with O_WRITE flags, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
func fileOpen(ctx context.Context, c *rest.Client, srcFs *Fs, fileID int64) (*api.FileOpenResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_open",
Parameters: url.Values{},
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
ExtraHeaders: map[string]string{
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fileid", strconv.FormatInt(fileID, 10))
opts.Parameters.Set("flags", "0x0002") // O_WRITE
result := &api.FileOpenResponse{}
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("open new file descriptor: %w", err)
}
return result, nil
}
// Call pcloud file_checksum, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
func fileChecksum(
func (c *writerAt) fileChecksum(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd, offset, count int64,
offset, count int64,
) (*api.FileChecksumResponse, error) {
opts := rest.Opts{
Method: "PUT",
@@ -177,29 +140,26 @@ func fileChecksum(
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
result := &api.FileChecksumResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", fd, offset, count, err)
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
}
return result, nil
}
// Call pcloud file_pwrite, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
func filePWrite(
func (c *writerAt) filePWrite(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
offset int64,
buf []byte,
) (*api.FilePWriteResponse, error) {
@@ -216,29 +176,24 @@ func filePWrite(
"Connection": "keep-alive",
},
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
result := &api.FilePWriteResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, fd, offset, err)
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
}
return result, nil
}
// Call pcloud file_close, see [API Doc.]
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
func fileClose(
ctx context.Context,
client *rest.Client,
pacer *fs.Pacer,
fd int64,
) (*api.FileCloseResponse, error) {
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
opts := rest.Opts{
Method: "PUT",
Path: "/file_close",
@@ -246,11 +201,11 @@ func fileClose(
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
Close: true,
}
opts.Parameters.Set("fd", strconv.FormatInt(fd, 10))
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
result := &api.FileCloseResponse{}
err := pacer.CallNoRetry(func() (bool, error) {
resp, err := client.CallJSON(ctx, &opts, nil, result)
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
err = result.Error.Update(err)
return shouldRetry(ctx, resp, err)
})

View File

@@ -82,11 +82,13 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://user.mypikpak.com/v1/auth/signin",
TokenURL: "https://user.mypikpak.com/v1/auth/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: clientID,
RedirectURL: oauthutil.RedirectURL,
}
@@ -213,11 +215,6 @@ Fill in for rclone to use a non root folder as its starting point.
Default: false,
Help: "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure.",
Advanced: true,
}, {
Name: "no_media_link",
Default: false,
Help: "Use original file links instead of media links.\n\nThis avoids issues caused by invalid media links, but may reduce download speeds.",
Advanced: true,
}, {
Name: "hash_memory_limit",
Help: "Files bigger than this will be cached on disk to calculate hash if required.",
@@ -291,7 +288,6 @@ type Options struct {
RootFolderID string `config:"root_folder_id"`
UseTrash bool `config:"use_trash"`
TrashedOnly bool `config:"trashed_only"`
NoMediaLink bool `config:"no_media_link"`
HashMemoryThreshold fs.SizeSuffix `config:"hash_memory_limit"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
@@ -1581,14 +1577,15 @@ func (o *Object) setMetaData(info *api.File) (err error) {
o.md5sum = info.Md5Checksum
if info.Links.ApplicationOctetStream != nil {
o.link = info.Links.ApplicationOctetStream
if !o.fs.opt.NoMediaLink {
if fid := parseFileID(o.link.URL); fid != "" {
for _, media := range info.Medias {
if media.Link != nil && parseFileID(media.Link.URL) == fid {
fs.Debugf(o, "Using a media link")
o.link = media.Link
break
}
if fid := parseFileID(o.link.URL); fid != "" {
for mid, media := range info.Medias {
if media.Link == nil {
continue
}
if mfid := parseFileID(media.Link.URL); fid == mfid {
fs.Debugf(o, "Using a media link from Medias[%d]", mid)
o.link = media.Link
break
}
}
}

View File

@@ -43,6 +43,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -58,10 +59,12 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
Scopes: nil,
AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
oauthConfig = &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://www.premiumize.me/authorize",
TokenURL: "https://www.premiumize.me/token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,

View File

@@ -13,6 +13,7 @@ import (
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
"golang.org/x/oauth2"
)
/*
@@ -40,10 +41,12 @@ const (
var (
// Description of how to auth for this app
putioConfig = &oauthutil.Config{
Scopes: []string{},
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
putioConfig = &oauth2.Config{
Scopes: []string{},
Endpoint: oauth2.Endpoint{
AuthURL: "https://api.put.io/v2/oauth2/authenticate",
TokenURL: "https://api.put.io/v2/oauth2/access_token",
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,

View File

@@ -1343,7 +1343,7 @@ func init() {
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Magalu,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -1476,6 +1476,14 @@ func init() {
Value: "s3.ir-tbz-sh1.arvanstorage.ir",
Help: "ArvanCloud Tabriz Iran (Shahriar) endpoint",
Provider: "ArvanCloud",
}, {
Value: "br-se1.magaluobjects.com",
Help: "Magalu BR Southeast 1 endpoint",
Provider: "Magalu",
}, {
Value: "br-ne1.magaluobjects.com",
Help: "Magalu BR Northeast 1 endpoint",
Provider: "Magalu",
}},
}, {
Name: "location_constraint",
@@ -2048,7 +2056,7 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier Flexible Retrieval storage class",
Help: "Glacier storage class",
}, {
Value: "DEEP_ARCHIVE",
Help: "Glacier Deep Archive storage class",
@@ -2114,16 +2122,13 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
Help: "Standard storage class",
}},
}, {
// Mapping from here: https://docs.magalu.cloud/docs/storage/object-storage/Classes-de-Armazenamento/standard
// Mapping from here: #todo
Name: "storage_class",
Help: "The storage class to use when storing new objects in Magalu.",
Provider: "Magalu",
Examples: []fs.OptionExample{{
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "GLACIER_IR",
Help: "Glacier Instant Retrieval storage class",
}},
}, {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
@@ -3339,7 +3344,7 @@ func setQuirks(opt *Options) {
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
urlEncodeListings = true // URL encode the listings to help with control characters
useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
@@ -3677,9 +3682,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.Provider == "IDrive" {
f.features.SetTier = false
}
if opt.Provider == "AWS" {
f.features.DoubleSlash = true
}
if opt.DirectoryMarkers {
f.features.CanHaveEmptyDirectories = true
}
@@ -4151,7 +4153,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
opt.prefix += "/"
}
if !opt.findFile {
if opt.directory != "" && (opt.prefix == "" && !bucket.IsAllSlashes(opt.directory) || opt.prefix != "" && !strings.HasSuffix(opt.directory, "/")) {
if opt.directory != "" {
opt.directory += "/"
}
}
@@ -4248,18 +4250,14 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
}
remote = f.opt.Enc.ToStandardPath(remote)
if !strings.HasPrefix(remote, opt.prefix) {
fs.Logf(f, "Odd directory name received %q", remote)
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[len(opt.prefix):]
// Trim one slash off the remote name
remote, _ = strings.CutSuffix(remote, "/")
if remote == "" || bucket.IsAllSlashes(remote) {
remote += "/"
}
if opt.addBucket {
remote = bucket.Join(opt.bucket, remote)
}
remote = strings.TrimSuffix(remote, "/")
err = fn(remote, &types.Object{Key: &remote}, nil, true)
if err != nil {
if err == errEndList {
@@ -5868,25 +5866,6 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
return resp.Body, err
}
// middleware to stop the SDK adding `Accept-Encoding: identity`
func removeDisableGzip() func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
_, err := stack.Finalize.Remove("DisableAcceptEncodingGzip")
return err
}
}
// middleware to set Accept-Encoding to how we want it
//
// This make sure we download compressed files as-is from all platforms
func (f *Fs) acceptEncoding() (APIOptions []func(*middleware.Stack) error) {
APIOptions = append(APIOptions, removeDisableGzip())
if f.opt.UseAcceptEncodingGzip.Value {
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
}
return APIOptions
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
bucket, bucketPath := o.split()
@@ -5920,8 +5899,11 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var APIOptions []func(*middleware.Stack) error
// Set the SDK to always download compressed files as-is
APIOptions = append(APIOptions, o.fs.acceptEncoding()...)
// Override the automatic decompression in the transport to
// download compressed files as-is
if o.fs.opt.UseAcceptEncodingGzip.Value {
APIOptions = append(APIOptions, smithyhttp.AddHeaderValue("Accept-Encoding", "gzip"))
}
for _, option := range options {
switch option.(type) {
@@ -6059,7 +6041,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
if mOut == nil {
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
} else if mOut.UploadId == nil {
err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
}
}
return f.shouldRetry(ctx, err)
@@ -6072,8 +6054,8 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
chunkSize: int64(chunkSize),
size: size,
f: f,
bucket: ui.req.Bucket,
key: ui.req.Key,
bucket: mOut.Bucket,
key: mOut.Key,
uploadID: mOut.UploadId,
multiPartUploadInput: &mReq,
completedParts: make([]types.CompletedPart, 0),
@@ -6177,9 +6159,6 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
if chunkNumber <= 8 {
return w.f.shouldRetry(ctx, err)
}
if fserrors.ContextError(ctx, &err) {
return false, err
}
// retry all chunks once have done the first few
return true, err
}

View File

@@ -23,20 +23,14 @@ func SetupS3Test(t *testing.T) (context.Context, *Options, *http.Client) {
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
opt := &fstests.Opt{
fstests.Run(t, &fstests.Opt{
RemoteName: "TestS3:",
NilObject: (*Object)(nil),
TiersToTest: []string{"STANDARD"},
TiersToTest: []string{"STANDARD", "STANDARD_IA"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MinChunkSize: minChunkSize,
},
}
// Test wider range of tiers on AWS
if *fstest.RemoteName == "" || *fstest.RemoteName == "TestS3:" {
opt.TiersToTest = []string{"STANDARD", "STANDARD_IA"}
}
fstests.Run(t, opt)
})
}
func TestIntegration2(t *testing.T) {

View File

@@ -97,6 +97,7 @@ import (
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
const (
@@ -114,11 +115,13 @@ const (
)
// Generate a new oauth2 config which we will update when we know the TokenURL
func newOauthConfig(tokenURL string) *oauthutil.Config {
return &oauthutil.Config{
Scopes: nil,
AuthURL: "https://secure.sharefile.com/oauth/authorize",
TokenURL: tokenURL,
func newOauthConfig(tokenURL string) *oauth2.Config {
return &oauth2.Config{
Scopes: nil,
Endpoint: oauth2.Endpoint{
AuthURL: "https://secure.sharefile.com/oauth/authorize",
TokenURL: tokenURL,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectPublicSecureURL,
@@ -133,7 +136,7 @@ func init() {
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
oauthConfig := newOauthConfig("")
checkAuth := func(oauthConfig *oauthutil.Config, auth *oauthutil.AuthResult) error {
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
if auth == nil || auth.Form == nil {
return errors.New("endpoint not found in response")
}
@@ -144,7 +147,7 @@ func init() {
}
endpoint := "https://" + subdomain + "." + apicp
m.Set("endpoint", endpoint)
oauthConfig.TokenURL = endpoint + tokenPath
oauthConfig.Endpoint.TokenURL = endpoint + tokenPath
return nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{

View File

@@ -31,29 +31,13 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
}
}
d := &smb2.Dialer{}
if f.opt.UseKerberos {
cl, err := getKerberosClient()
if err != nil {
return nil, err
}
spn := f.opt.SPN
if spn == "" {
spn = "cifs/" + f.opt.Host
}
d.Initiator = &smb2.Krb5Initiator{
Client: cl,
TargetSPN: spn,
}
} else {
d.Initiator = &smb2.NTLMInitiator{
d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: f.opt.User,
Password: pass,
Domain: f.opt.Domain,
TargetSPN: f.opt.SPN,
}
},
}
session, err := d.DialConn(ctx, tconn, addr)

View File

@@ -1,78 +0,0 @@
package smb
import (
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
)
var (
kerberosClient *client.Client
kerberosErr error
kerberosOnce sync.Once
)
// getKerberosClient returns a Kerberos client that can be used to authenticate.
func getKerberosClient() (*client.Client, error) {
if kerberosClient == nil || kerberosErr == nil {
kerberosOnce.Do(func() {
kerberosClient, kerberosErr = createKerberosClient()
})
}
return kerberosClient, kerberosErr
}
// createKerberosClient creates a new Kerberos client.
func createKerberosClient() (*client.Client, error) {
cfgPath := os.Getenv("KRB5_CONFIG")
if cfgPath == "" {
cfgPath = "/etc/krb5.conf"
}
cfg, err := config.Load(cfgPath)
if err != nil {
return nil, err
}
// Determine the ccache location from the environment, falling back to the
// default location.
ccachePath := os.Getenv("KRB5CCNAME")
switch {
case strings.Contains(ccachePath, ":"):
parts := strings.SplitN(ccachePath, ":", 2)
switch parts[0] {
case "FILE":
ccachePath = parts[1]
case "DIR":
primary, err := os.ReadFile(filepath.Join(parts[1], "primary"))
if err != nil {
return nil, err
}
ccachePath = filepath.Join(parts[1], strings.TrimSpace(string(primary)))
default:
return nil, fmt.Errorf("unsupported KRB5CCNAME: %s", ccachePath)
}
case ccachePath == "":
u, err := user.Current()
if err != nil {
return nil, err
}
ccachePath = "/tmp/krb5cc_" + u.Uid
}
ccache, err := credentials.LoadCCache(ccachePath)
if err != nil {
return nil, err
}
return client.NewFromCCache(ccache, cfg)
}

View File

@@ -76,16 +76,6 @@ authentication, and it often needs to be set for clusters. For example:
Leave blank if not sure.
`,
Sensitive: true,
}, {
Name: "use_kerberos",
Help: `Use Kerberos authentication.
If set, rclone will use Kerberos authentication instead of NTLM. This
requires a valid Kerberos configuration and credentials cache to be
available, either in the default locations or as specified by the
KRB5_CONFIG and KRB5CCNAME environment variables.
`,
Default: false,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -136,7 +126,6 @@ type Options struct {
Pass string `config:"pass"`
Domain string `config:"domain"`
SPN string `config:"spn"`
UseKerberos bool `config:"use_kerberos"`
HideSpecial bool `config:"hide_special_share"`
CaseInsensitive bool `config:"case_insensitive"`
IdleTimeout fs.Duration `config:"idle_timeout"`
@@ -612,10 +601,9 @@ func (o *Object) SetModTime(ctx context.Context, t time.Time) (err error) {
}
fi, err := cn.smbShare.Stat(reqDir)
if err != nil {
return fmt.Errorf("SetModTime: stat: %w", err)
if err == nil {
o.statResult = fi
}
o.statResult = fi
return err
}
@@ -697,6 +685,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
defer func() {
o.statResult, _ = cn.smbShare.Stat(filename)
o.fs.putConnection(&cn)
}()
@@ -734,7 +723,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return fmt.Errorf("Update Close failed: %w", err)
}
// Set the modified time and also o.statResult
// Set the modified time
err = o.SetModTime(ctx, src.ModTime(ctx))
if err != nil {
return fmt.Errorf("Update SetModTime failed: %w", err)

View File

@@ -2,7 +2,6 @@
package smb_test
import (
"path/filepath"
"testing"
"github.com/rclone/rclone/backend/smb"
@@ -16,13 +15,3 @@ func TestIntegration(t *testing.T) {
NilObject: (*smb.Object)(nil),
})
}
func TestIntegration2(t *testing.T) {
krb5Dir := t.TempDir()
t.Setenv("KRB5_CONFIG", filepath.Join(krb5Dir, "krb5.conf"))
t.Setenv("KRB5CCNAME", filepath.Join(krb5Dir, "ccache"))
fstests.Run(t, &fstests.Opt{
RemoteName: "TestSMBKerberos:rclone",
NilObject: (*smb.Object)(nil),
})
}

View File

@@ -161,24 +161,7 @@ Set to 0 to disable chunked uploading.
Default: false,
},
fshttp.UnixSocketConfig,
{
Name: "auth_redirect",
Help: `Preserve authentication on redirect.
If the server redirects rclone to a new domain when it is trying to
read a file then normally rclone will drop the Authorization: header
from the request.
This is standard security practice to avoid sending your credentials
to an unknown webserver.
However this is desirable in some circumstances. If you are getting
an error like "401 Unauthorized" when rclone is attempting to read
files from the webdav server then you can try this option.
`,
Advanced: true,
Default: false,
}},
},
})
}
@@ -197,7 +180,6 @@ type Options struct {
ExcludeShares bool `config:"owncloud_exclude_shares"`
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
UnixSocket string `config:"unix_socket"`
AuthRedirect bool `config:"auth_redirect"`
}
// Fs represents a remote webdav
@@ -1474,7 +1456,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
ExtraHeaders: map[string]string{
"Depth": "0",
},
AuthRedirect: o.fs.opt.AuthRedirect, // allow redirects to preserve Auth
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.Call(ctx, &opts)

View File

@@ -29,6 +29,7 @@ import (
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
)
// oAuth
@@ -46,9 +47,11 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
oauthConfig = &oauth2.Config{
Endpoint: oauth2.Endpoint{
AuthURL: "https://oauth.yandex.com/authorize", //same as https://oauth.yandex.ru/authorize
TokenURL: "https://oauth.yandex.com/token", //same as https://oauth.yandex.ru/token
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,

View File

@@ -47,7 +47,7 @@ const (
// Globals
var (
// Description of how to auth for this app
oauthConfig = &oauthutil.Config{
oauthConfig = &oauth2.Config{
Scopes: []string{
"aaaserver.profile.read",
"WorkDrive.team.READ",
@@ -55,10 +55,11 @@ var (
"WorkDrive.files.ALL",
"ZohoFiles.files.ALL",
},
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
AuthStyle: oauth2.AuthStyleInParams,
Endpoint: oauth2.Endpoint{
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
TokenURL: "https://accounts.zoho.eu/oauth/v2/token",
AuthStyle: oauth2.AuthStyleInParams,
},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -275,8 +276,8 @@ func setupRegion(m configmap.Mapper) error {
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
oauthConfig.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
return nil
}

View File

@@ -7,11 +7,11 @@ for backend in $( find backend -maxdepth 1 -type d ); do
continue
fi
commit=$(git log --oneline -- $backend | tail -n 1 | cut -d' ' -f1)
commit=$(git log --oneline -- $backend | tail -1 | cut -d' ' -f1)
if [ "$commit" == "" ]; then
commit=$(git log --oneline -- backend/$backend | tail -n 1 | cut -d' ' -f1)
commit=$(git log --oneline -- backend/$backend | tail -1 | cut -d' ' -f1)
fi
version=$(git tag --contains $commit | grep ^v | sort -n | head -n 1)
version=$(git tag --contains $commit | grep ^v | sort -n | head -1)
echo $backend $version
sed -i~ "4i versionIntroduced: \"$version\"" docs/content/${backend}.md
done

View File

@@ -7,7 +7,6 @@ conversion into man pages etc.
import os
import re
import time
import subprocess
from datetime import datetime
docpath = "docs/content"
@@ -36,7 +35,6 @@ docs = [
"box.md",
"cache.md",
"chunker.md",
"cloudinary.md",
"sharefile.md",
"crypt.md",
"compress.md",
@@ -193,23 +191,13 @@ def main():
command_docs = read_commands(docpath).replace("\\", "\\\\") # escape \ so we can use command_docs in re.sub
build_date = datetime.utcfromtimestamp(
int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
help_output = subprocess.check_output(["rclone", "help"]).decode("utf-8")
with open(outfile, "w") as out:
out.write("""\
%% rclone(1) User Manual
%% Nick Craig-Wood
%% %s
# NAME
rclone - manage files on cloud storage
# SYNOPSIS
```
%s
```
""" % (build_date.strftime("%b %d, %Y"), help_output))
""" % build_date.strftime("%b %d, %Y"))
for doc in docs:
contents = read_doc(doc)
# Substitute the commands into doc.md

View File

@@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
out, err := cmd.Output()
if err != nil {
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
}
logMap = map[string]string{}
logs = []string{}
@@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
}
match := logRe.FindSubmatch(line)
if match == nil {
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
}
var hash, logMessage = string(match[1]), string(match[2])
logMap[logMessage] = hash
@@ -52,12 +52,12 @@ func main() {
flag.Parse()
args := flag.Args()
if len(args) != 0 {
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
}
// v1.54.0
versionBytes, err := os.ReadFile("VERSION")
if err != nil {
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
}
if versionBytes[0] == 'v' {
versionBytes = versionBytes[1:]
@@ -65,7 +65,7 @@ func main() {
versionBytes = bytes.TrimSpace(versionBytes)
semver := semver.New(string(versionBytes))
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
masterMap, masterLogs := readCommits(stable+".0", "master")
stableMap, _ := readCommits(stable+".0", stable+"-stable")
for _, logMessage := range masterLogs {

View File

@@ -7,18 +7,15 @@ Run with no arguments to test all backends or a supply a list of
backends to test.
"""
import os
import re
import sys
import subprocess
all_backends = "backend/all/all.go"
# compile command which is more or less like the production builds
compile_command = ["go", "build", "--ldflags", "-s", "-trimpath"]
# disable CGO as that makes a lot of difference to binary size
os.environ["CGO_ENABLED"]="0"
import os
import re
import sys
import subprocess
match_backend = re.compile(r'"github.com/rclone/rclone/backend/(.*?)"')
@@ -46,9 +43,6 @@ def write_all(orig_all, backend):
# Comment out line matching backend
if match and match.group(1) == backend:
line = "// " + line
# s3 and pikpak depend on each other
if backend == "s3" and "pikpak" in line:
line = "// " + line
fd.write(line+"\n")
def compile():

View File

@@ -13,7 +13,7 @@ if [ "$1" == "" ]; then
exit 1
fi
VERSION="$1"
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -n 1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
ANCHOR=$(grep '^## v' docs/content/changelog.md | head -1 | sed 's/^## //; s/[^A-Za-z0-9-]/-/g; s/--*/-/g')
cat > "/tmp/${VERSION}-release-notes" <<EOF
This is the ${VERSION} release of rclone.

View File

@@ -9,7 +9,7 @@ import (
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
)
const configFile = "../../fstest/test_all/config.yaml"

View File

@@ -63,40 +63,40 @@ func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
}
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
fs.Log(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set.")) ///nolint:govet
ci.CheckSum = false
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
} else if b.opt.Compare.Checksum && !ci.CheckSum {
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set.")) //nolint:govet
}
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead.")) //nolint:govet
}
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String()) //nolint:govet
b.opt.Compare.Modtime = true
b.opt.Compare.Size = true
ci.CheckSum = false
b.opt.Compare.Checksum = false
} else {
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set")) //nolint:govet
// note: --checksum will still affect the internal sync calls
}
}
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
fs.Infoc(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.")) //nolint:govet
b.opt.IgnoreListingChecksum = true
}
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)")) //nolint:govet
}
notSupported := func(label string, value bool, opt *bool) {
if value {
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label) //nolint:govet
*opt = false
}
}
@@ -123,13 +123,13 @@ func sizeDiffers(a, b int64) bool {
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
if a == "" || b == "" {
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b) //nolint:govet
}
return false
}
if ht1 != ht2 {
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String()) //nolint:govet
return false
}
}
@@ -151,7 +151,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
return
}
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
fs.Log(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common.")) //nolint:govet
b.opt.Compare.SlowHashSyncOnly = false
b.opt.Compare.NoSlowHash = true
ci.CheckSum = false
@@ -159,7 +159,7 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
}
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
fs.Log(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)")) //nolint:govet
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
b.opt.Compare.Modtime = true
@@ -167,25 +167,25 @@ func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
ci.CheckSum = false
}
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings")) //nolint:govet
b.opt.Compare.HashType1 = hash.None
} else {
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
if b.opt.Compare.HashType1 != hash.None {
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1) //nolint:govet
}
}
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
fs.Infoc(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings")) //nolint:govet
b.opt.Compare.HashType1 = hash.None
} else {
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
if b.opt.Compare.HashType2 != hash.None {
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2) //nolint:govet
}
}
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
fs.Log(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides.")) //nolint:govet
b.opt.Compare.Checksum = false
ci.CheckSum = false
b.opt.IgnoreListingChecksum = true
@@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
if b.opt.CompareFlag == "" {
return nil
}
var CompareFlag CompareOpt // for exclusions
var CompareFlag CompareOpt // for exlcusions
opts := strings.Split(b.opt.CompareFlag, ",")
for _, opt := range opts {
switch strings.ToLower(strings.TrimSpace(opt)) {
@@ -232,7 +232,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
b.opt.Compare.Checksum = true
CompareFlag.Checksum = true
default:
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt) //nolint:govet
}
}
@@ -284,14 +284,14 @@ func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string
}
if o.Size() < 0 {
downloadHashWarn.Do(func() {
fs.Log(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length.")) //nolint:govet
})
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
return hashVal, hash.ErrUnsupported
}
firstDownloadHash.Do(func() {
fs.Infoc(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes...")) //nolint:govet
})
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
defer func() {

View File

@@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
return "", "", fmt.Errorf("invalid hash %q", str)
}
// checkListing verifies that listing is not empty (unless resyncing)
// checkListing verifies that listing is not empty (unless resynching)
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
if b.opt.Resync || !ls.empty() {
return nil

View File

@@ -78,15 +78,6 @@ func Color(style string, s string) string {
return style + s + terminal.Reset
}
// ColorX handles terminal colors for bisync
func ColorX(style string, s string) string {
if !Colors {
return s
}
terminal.Start()
return style + s + terminal.Reset
}
func encode(s string) string {
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
}

View File

@@ -131,18 +131,18 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
finaliseOnce.Do(func() {
if atexit.Signalled() {
if b.opt.Resync {
fs.Log(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)")) //nolint:govet
} else {
fs.Log(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)")) //nolint:govet
b.InGracefulShutdown = true
if b.SyncCI != nil {
fs.Infoc(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early.")) //nolint:govet
b.SyncCI.MaxTransfer = 1
b.SyncCI.MaxDuration = 1 * time.Second
b.SyncCI.CutoffMode = fs.CutoffModeSoft
gracePeriod := 30 * time.Second // TODO: flag to customize this?
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
fs.Log(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up")) //nolint:govet
b.CancelSync()
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
}
@@ -150,13 +150,13 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
// we haven't started to sync yet, so we're good.
// no need to worry about the listing files, as we haven't overwritten them yet.
b.CleanupCompleted = true
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
}
}
if !b.CleanupCompleted {
if !b.opt.Resync {
fs.Log(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
fs.Log(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed.")) //nolint:govet
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover.")) //nolint:govet
}
markFailed(b.listing1)
markFailed(b.listing2)
@@ -180,14 +180,14 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
b.critical = false
}
if err == nil {
fs.Log(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully.")) //nolint:govet
}
}
if b.critical {
if b.retryable && b.opt.Resilient {
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
fs.Error(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err) //nolint:govet
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")) //nolint:govet
} else {
if bilib.FileExists(b.listing1) {
_ = os.Rename(b.listing1, b.listing1+"-err")
@@ -196,15 +196,15 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
_ = os.Rename(b.listing2, b.listing2+"-err")
}
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
fs.Error(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover.")) //nolint:govet
}
return ErrBisyncAborted
}
if b.abort && !b.InGracefulShutdown {
fs.Log(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again.")) //nolint:govet
}
if err == nil {
fs.Infoc(nil, Color(terminal.GreenFg, "Bisync successful"))
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful")) //nolint:govet
}
return err
}
@@ -270,7 +270,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
fs.Log(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip) //nolint:govet
if opt.CheckSync != CheckSyncFalse {
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
@@ -279,7 +279,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
b.retryable = true
return err
}
fs.Infoc(nil, Color(terminal.GreenFg, "Backup listing is valid."))
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid.")) //nolint:govet
}
b.revertToOldListings()
} else {
@@ -299,7 +299,7 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) {
fs.Infof(nil, "Building Path1 and Path2 listings")
ls1, ls2, err = b.makeMarchListing(fctx)
if err != nil || accounting.Stats(fctx).Errored() {
fs.Error(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue.")) //nolint:govet
b.critical = true
b.retryable = true
return err
@@ -623,7 +623,7 @@ func (b *bisyncRun) checkSyntax() error {
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
if b.DebugName != "" && b.DebugName == nametocheck {
fs.Infoc(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue)) //nolint:govet
}
}

View File

@@ -161,7 +161,7 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
prettyprint(result, "writing result", fs.LogLevelDebug)
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
once.Do(func() {
fs.Log(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs")) //nolint:govet
})
}

View File

@@ -142,7 +142,7 @@ func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias s
if winningPath > 0 {
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
} else {
fs.Infoc(file, Color(terminal.RedFg, "A winner could not be determined."))
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined.")) //nolint:govet
}
}

View File

@@ -15,7 +15,7 @@ import (
// and either flag is sufficient without the other.
func (b *bisyncRun) setResyncDefaults() {
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
fs.Debug(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set")) //nolint:govet
b.opt.ResyncMode = PreferPath1
}
if b.opt.ResyncMode != PreferNone {

View File

@@ -80,7 +80,6 @@ INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path1 Queue copy to Path2 - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2
INFO : subdir: Making directory
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
(05) : move-listings empty-path1
(06) : test 2. resync with empty path2, resulting in syncing all content to path2.
(06) : test 2. resync with empty path2, resulting in synching all content to path2.
(07) : purge-children {path2/}
(08) : bisync resync
INFO : Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.

View File

@@ -1,6 +1,6 @@
test resync
# 1. Resync with empty Path1, resulting in copying all content FROM Path2
# 2. Resync with empty Path2, resulting in syncing all content TO Path2
# 2. Resync with empty Path2, resulting in synching all content TO Path2
# 3. Exercise all of the various file difference scenarios during a resync:
# File Path1 Path2 Expected action Who wins
# - file1.txt Exists Missing Sync Path1 >Path2 Path1
@@ -17,7 +17,7 @@ purge-children {path1/}
bisync resync
move-listings empty-path1
test 2. resync with empty path2, resulting in syncing all content to path2.
test 2. resync with empty path2, resulting in synching all content to path2.
purge-children {path2/}
bisync resync
move-listings empty-path2

View File

@@ -429,12 +429,11 @@ func initConfig() {
fs.Fatalf(nil, "Failed to start remote control: %v", err)
}
// Start the metrics server if configured and not running the "rc" command
if os.Args[1] != "rc" {
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
if err != nil {
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
}
// Start the metrics server if configured
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
if err != nil {
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
}
// Setup CPU profiling if desired

View File

@@ -121,6 +121,19 @@ func (fsys *FS) lookupParentDir(filePath string) (leaf string, dir *vfs.Dir, err
return leaf, dir, errc
}
// lookup a File given a path
func (fsys *FS) lookupFile(path string) (file *vfs.File, errc int) {
node, errc := fsys.lookupNode(path)
if errc != 0 {
return nil, errc
}
file, ok := node.(*vfs.File)
if !ok {
return nil, -fuse.EISDIR
}
return file, 0
}
// get a node and handle from the path or from the fh if not fhUnset
//
// handle may be nil
@@ -141,9 +154,15 @@ func (fsys *FS) stat(node vfs.Node, stat *fuse.Stat_t) (errc int) {
Size := uint64(node.Size())
Blocks := (Size + 511) / 512
modTime := node.ModTime()
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else {
Mode |= fuse.S_IFREG
}
//stat.Dev = 1
stat.Ino = node.Inode() // FIXME do we need to set the inode number?
stat.Mode = getMode(node)
stat.Mode = uint32(Mode)
stat.Nlink = 1
stat.Uid = fsys.VFS.Opt.UID
stat.Gid = fsys.VFS.Opt.GID
@@ -490,15 +509,14 @@ func (fsys *FS) Link(oldpath string, newpath string) (errc int) {
// Symlink creates a symbolic link.
func (fsys *FS) Symlink(target string, newpath string) (errc int) {
defer log.Trace(target, "newpath=%q, target=%q", newpath, target)("errc=%d", &errc)
return translateError(fsys.VFS.Symlink(target, newpath))
defer log.Trace(target, "newpath=%q", newpath)("errc=%d", &errc)
return -fuse.ENOSYS
}
// Readlink reads the target of a symbolic link.
func (fsys *FS) Readlink(path string) (errc int, linkPath string) {
defer log.Trace(path, "")("errc=%v, linkPath=%q", &errc, linkPath)
linkPath, err := fsys.VFS.Readlink(path)
return translateError(err), linkPath
defer log.Trace(path, "")("linkPath=%q, errc=%d", &linkPath, &errc)
return -fuse.ENOSYS, ""
}
// Chmod changes the permission bits of a file.
@@ -562,7 +580,7 @@ func (fsys *FS) Getpath(path string, fh uint64) (errc int, normalisedPath string
return errc, ""
}
normalisedPath = node.Path()
if !strings.HasPrefix(normalisedPath, "/") {
if !strings.HasPrefix("/", normalisedPath) {
normalisedPath = "/" + normalisedPath
}
return 0, normalisedPath
@@ -597,8 +615,6 @@ func translateError(err error) (errc int) {
return -fuse.ENOSYS
case vfs.EINVAL:
return -fuse.EINVAL
case vfs.ELOOP:
return -fuse.ELOOP
}
fs.Errorf(nil, "IO error: %v", err)
return -fuse.EIO
@@ -630,22 +646,6 @@ func translateOpenFlags(inFlags int) (outFlags int) {
return outFlags
}
// get the Mode from a vfs Node
func getMode(node os.FileInfo) uint32 {
vfsMode := node.Mode()
Mode := vfsMode.Perm()
if vfsMode&os.ModeDir != 0 {
Mode |= fuse.S_IFDIR
} else if vfsMode&os.ModeSymlink != 0 {
Mode |= fuse.S_IFLNK
} else if vfsMode&os.ModeNamedPipe != 0 {
Mode |= fuse.S_IFIFO
} else {
Mode |= fuse.S_IFREG
}
return uint32(Mode)
}
// Make sure interfaces are satisfied
var (
_ fuse.FileSystemInterface = (*FS)(nil)

View File

@@ -10,6 +10,7 @@ import (
"fmt"
"os"
"runtime"
"strings"
"time"
"github.com/rclone/rclone/cmd/mountlib"
@@ -34,6 +35,19 @@ func init() {
buildinfo.Tags = append(buildinfo.Tags, "cmount")
}
// Find the option string in the current options
func findOption(name string, options []string) (found bool) {
for _, option := range options {
if option == "-o" {
continue
}
if strings.Contains(option, name) {
return true
}
}
return false
}
// mountOptions configures the options from the command line flags
func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.Options) (options []string) {
// Options
@@ -79,9 +93,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
if VFS.Opt.ReadOnly {
options = append(options, "-o", "ro")
}
//if opt.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
//}
if opt.WritebackCache {
// FIXME? options = append(options, "-o", WritebackCache())
}
if runtime.GOOS == "darwin" {
if opt.VolumeName != "" {
options = append(options, "-o", "volname="+opt.VolumeName)
@@ -97,7 +111,9 @@ func mountOptions(VFS *vfs.VFS, device string, mountpoint string, opt *mountlib.
for _, option := range opt.ExtraOptions {
options = append(options, "-o", option)
}
options = append(options, opt.ExtraFlags...)
for _, option := range opt.ExtraFlags {
options = append(options, option)
}
return options
}

View File

@@ -549,12 +549,12 @@ password to re-encrypt the config.
When |--password-command| is called to change the password then the
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
changing passwords programmatically you can use the environment
changing passwords programatically you can use the environment
variable to distinguish which password you must supply.
Alternatively you can remove the password first (with |rclone config
encryption remove|), then set it again with this command which may be
easier if you don't mind the unencrypted config file being on the disk
easier if you don't mind the unecrypted config file being on the disk
briefly.
`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {

View File

@@ -54,7 +54,7 @@ destination if there is one with the same name.
Setting |--stdout| or making the output file name |-|
will cause the output to be written to standard output.
### Troubleshooting
### Troublshooting
If you can't get |rclone copyurl| to work then here are some things you can try:

View File

@@ -7,7 +7,6 @@ import (
"fmt"
"io"
"os"
"path"
"syscall"
"time"
@@ -34,7 +33,7 @@ func (d *Dir) Attr(ctx context.Context, a *fuse.Attr) (err error) {
a.Valid = time.Duration(d.fsys.opt.AttrTimeout)
a.Gid = d.VFS().Opt.GID
a.Uid = d.VFS().Opt.UID
a.Mode = d.Mode()
a.Mode = os.ModeDir | os.FileMode(d.VFS().Opt.DirPerms)
modTime := d.ModTime()
a.Atime = modTime
a.Mtime = modTime
@@ -141,13 +140,11 @@ var _ fusefs.NodeCreater = (*Dir)(nil)
// Create makes a new file
func (d *Dir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fusefs.Node, handle fusefs.Handle, err error) {
defer log.Trace(d, "name=%q", req.Name)("node=%v, handle=%v, err=%v", &node, &handle, &err)
// translate the fuse flags to os flags
osFlags := int(req.Flags) | os.O_CREATE
file, err := d.Dir.Create(req.Name, osFlags)
file, err := d.Dir.Create(req.Name, int(req.Flags))
if err != nil {
return nil, nil, translateError(err)
}
fh, err := file.Open(osFlags)
fh, err := file.Open(int(req.Flags) | os.O_CREATE)
if err != nil {
return nil, nil, translateError(err)
}
@@ -203,6 +200,7 @@ func (d *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fusefs
if !ok {
return fmt.Errorf("unknown Dir type %T", newDir)
}
err = d.Dir.Rename(req.OldName, req.NewName, destDir.Dir)
if err != nil {
return translateError(err)
@@ -241,24 +239,6 @@ func (d *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fusefs.Node)
return nil, syscall.ENOSYS
}
var _ fusefs.NodeSymlinker = (*Dir)(nil)
// Symlink create a symbolic link.
func (d *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (node fusefs.Node, err error) {
defer log.Trace(d, "newname=%v, target=%v", req.NewName, req.Target)("node=%v, err=%v", &node, &err)
newName := path.Join(d.Path(), req.NewName)
target := req.Target
n, err := d.VFS().CreateSymlink(target, newName)
if err != nil {
return nil, err
}
node = &File{n.(*vfs.File), d.fsys}
return node, nil
}
// Check interface satisfied
var _ fusefs.NodeMknoder = (*Dir)(nil)

View File

@@ -32,7 +32,7 @@ func (f *File) Attr(ctx context.Context, a *fuse.Attr) (err error) {
Blocks := (Size + 511) / 512
a.Gid = f.VFS().Opt.GID
a.Uid = f.VFS().Opt.UID
a.Mode = f.File.Mode() &^ os.ModeAppend
a.Mode = os.FileMode(f.VFS().Opt.FilePerms)
a.Size = Size
a.Atime = modTime
a.Mtime = modTime
@@ -129,11 +129,3 @@ func (f *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) er
}
var _ fusefs.NodeRemovexattrer = (*File)(nil)
var _ fusefs.NodeReadlinker = (*File)(nil)
// Readlink read symbolic link target.
func (f *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (ret string, err error) {
defer log.Trace(f, "")("ret=%v, err=%v", &ret, &err)
return f.VFS().Readlink(f.Path())
}

View File

@@ -100,8 +100,6 @@ func translateError(err error) error {
return syscall.ENOSYS
case vfs.EINVAL:
return fuse.Errno(syscall.EINVAL)
case vfs.ELOOP:
return fuse.Errno(syscall.ELOOP)
}
fs.Errorf(nil, "IO error: %v", err)
return err

View File

@@ -51,14 +51,9 @@ func (f *FS) SetDebug(debug bool) {
// get the Mode from a vfs Node
func getMode(node os.FileInfo) uint32 {
vfsMode := node.Mode()
Mode := vfsMode.Perm()
if vfsMode&os.ModeDir != 0 {
Mode := node.Mode().Perm()
if node.IsDir() {
Mode |= fuse.S_IFDIR
} else if vfsMode&os.ModeSymlink != 0 {
Mode |= fuse.S_IFLNK
} else if vfsMode&os.ModeNamedPipe != 0 {
Mode |= fuse.S_IFIFO
} else {
Mode |= fuse.S_IFREG
}
@@ -133,8 +128,6 @@ func translateError(err error) syscall.Errno {
return syscall.ENOSYS
case vfs.EINVAL:
return syscall.EINVAL
case vfs.ELOOP:
return syscall.ELOOP
}
fs.Errorf(nil, "IO error: %v", err)
return syscall.EIO

View File

@@ -227,7 +227,7 @@ type dirStream struct {
// HasNext indicates if there are further entries. HasNext
// might be called on already closed streams.
func (ds *dirStream) HasNext() bool {
return ds.i < len(ds.nodes)+2
return ds.i < len(ds.nodes)
}
// Next retrieves the next entry. It is only called if HasNext
@@ -235,22 +235,7 @@ func (ds *dirStream) HasNext() bool {
// indicate I/O errors
func (ds *dirStream) Next() (de fuse.DirEntry, errno syscall.Errno) {
// defer log.Trace(nil, "")("de=%+v, errno=%v", &de, &errno)
if ds.i == 0 {
ds.i++
return fuse.DirEntry{
Mode: fuse.S_IFDIR,
Name: ".",
Ino: 0, // FIXME
}, 0
} else if ds.i == 1 {
ds.i++
return fuse.DirEntry{
Mode: fuse.S_IFDIR,
Name: "..",
Ino: 0, // FIXME
}, 0
}
fi := ds.nodes[ds.i-2]
fi := ds.nodes[ds.i]
de = fuse.DirEntry{
// Mode is the file's mode. Only the high bits (e.g. S_IFDIR)
// are considered.
@@ -458,31 +443,3 @@ func (n *Node) Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errn
}
var _ fusefs.NodeListxattrer = (*Node)(nil)
var _ fusefs.NodeReadlinker = (*Node)(nil)
// Readlink read symbolic link target.
func (n *Node) Readlink(ctx context.Context) (ret []byte, err syscall.Errno) {
defer log.Trace(n, "")("ret=%v, err=%v", &ret, &err)
path := n.node.Path()
s, serr := n.node.VFS().Readlink(path)
return []byte(s), translateError(serr)
}
var _ fusefs.NodeSymlinker = (*Node)(nil)
// Symlink create symbolic link.
func (n *Node) Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *fusefs.Inode, err syscall.Errno) {
defer log.Trace(n, "name=%v, target=%v", name, target)("node=%v, err=%v", &node, &err)
fullPath := path.Join(n.node.Path(), name)
vfsNode, serr := n.node.VFS().CreateSymlink(target, fullPath)
if serr != nil {
return nil, translateError(serr)
}
n.fsys.setEntryOut(vfsNode, out)
newNode := newNode(n.fsys, vfsNode)
newInode := n.NewInode(ctx, newNode, fusefs.StableAttr{Mode: out.Attr.Mode})
return newInode, 0
}

View File

@@ -373,9 +373,6 @@ func (m *MountPoint) Mount() (mountDaemon *os.Process, err error) {
m.ErrChan, m.UnmountFn, err = m.MountFn(m.VFS, m.MountPoint, &m.MountOpt)
if err != nil {
if len(os.Args) > 0 && strings.HasPrefix(os.Args[0], "/snap/") {
return nil, fmt.Errorf("mounting is not supported when running from snap")
}
return nil, fmt.Errorf("failed to mount FUSE fs: %w", err)
}
m.MountedOn = time.Now()

View File

@@ -3,9 +3,6 @@
package nfsmount
import (
"context"
"errors"
"os"
"os/exec"
"runtime"
"testing"
@@ -33,24 +30,7 @@ func TestMount(t *testing.T) {
}
sudo = true
}
for _, cacheType := range []string{"memory", "disk", "symlink"} {
t.Run(cacheType, func(t *testing.T) {
nfs.Opt.HandleCacheDir = t.TempDir()
require.NoError(t, nfs.Opt.HandleCache.Set(cacheType))
// Check we can create a handler
_, err := nfs.NewHandler(context.Background(), nil, &nfs.Opt)
if errors.Is(err, nfs.ErrorSymlinkCacheNotSupported) || errors.Is(err, nfs.ErrorSymlinkCacheNoPermission) {
t.Skip(err.Error() + ": run with: go test -c && sudo setcap cap_dac_read_search+ep ./nfsmount.test && ./nfsmount.test -test.v")
}
require.NoError(t, err)
// Configure rclone via environment var since the mount gets run in a subprocess
_ = os.Setenv("RCLONE_NFS_CACHE_DIR", nfs.Opt.HandleCacheDir)
_ = os.Setenv("RCLONE_NFS_CACHE_TYPE", cacheType)
t.Cleanup(func() {
_ = os.Unsetenv("RCLONE_NFS_CACHE_DIR")
_ = os.Unsetenv("RCLONE_NFS_CACHE_TYPE")
})
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
})
}
nfs.Opt.HandleCacheDir = t.TempDir()
require.NoError(t, nfs.Opt.HandleCache.Set("disk"))
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
}

View File

@@ -11,7 +11,6 @@ import (
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/anacrolix/dms/dlna"
@@ -159,18 +158,6 @@ func (cds *contentDirectoryService) readContainer(o object, host string) (ret []
}
}
// Sort the directory entries by directories first then alphabetically by name
sort.Slice(dirEntries, func(i, j int) bool {
iNode, jNode := dirEntries[i], dirEntries[j]
iIsDir, jIsDir := iNode.IsDir(), jNode.IsDir()
if iIsDir && !jIsDir {
return true
} else if !iIsDir && jIsDir {
return false
}
return strings.ToLower(iNode.Name()) < strings.ToLower(jNode.Name())
})
dirEntries, mediaResources := mediaWithResources(dirEntries)
for _, de := range dirEntries {
child := object{

View File

@@ -2,15 +2,17 @@ package docker
import (
"fmt"
"math"
"strings"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/vfs/vfscommon"
"github.com/spf13/pflag"
)
// applyOptions configures volume from request options.
@@ -110,15 +112,11 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
for key, val := range vol.Options {
opt[key] = val
}
mntMap := configmap.Simple{}
vfsMap := configmap.Simple{}
for key := range opt {
var ok bool
var err error
normalKey := normalOptName(key)
underscoreKey := strings.ReplaceAll(normalKey, "-", "_")
switch normalKey {
switch normalOptName(key) {
case "persist":
vol.persist, err = opt.GetBool(key)
ok = true
@@ -131,24 +129,25 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
}
if !ok {
// try to use as a mount option in mntMap
if mountlib.OptionsInfo.Get(underscoreKey) != nil {
mntMap[underscoreKey] = vol.Options[key]
ok = true
// try to use as a mount option in mntOpt
ok, err = getMountOption(mntOpt, opt, key)
if ok && err != nil {
return fmt.Errorf("cannot parse mount option %q: %w", key, err)
}
}
if !ok {
// try as a vfs option in vfsMap
if vfscommon.OptionsInfo.Get(underscoreKey) != nil {
vfsMap[underscoreKey] = vol.Options[key]
ok = true
// try as a vfs option in vfsOpt
ok, err = getVFSOption(vfsOpt, opt, key)
if ok && err != nil {
return fmt.Errorf("cannot parse vfs option %q: %w", key, err)
}
}
if !ok {
// try as a backend option in fsOpt (backends use "_" instead of "-")
fsOptName := strings.TrimPrefix(underscoreKey, fsType+"_")
hasFsPrefix := underscoreKey != fsOptName
optWithPrefix := strings.ReplaceAll(normalOptName(key), "-", "_")
fsOptName := strings.TrimPrefix(optWithPrefix, fsType+"_")
hasFsPrefix := optWithPrefix != fsOptName
if !hasFsPrefix || fsInfo.Options.Get(fsOptName) == nil {
fs.Logf(nil, "Option %q is not supported by backend %q", key, fsType)
return fmt.Errorf("unsupported backend option %q", key)
@@ -160,18 +159,6 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
}
}
// Parse VFS options
err = configstruct.Set(vfsMap, vfsOpt)
if err != nil {
return fmt.Errorf("cannot parse vfs options: %w", err)
}
// Parse Mount options
err = configstruct.Set(mntMap, mntOpt)
if err != nil {
return fmt.Errorf("cannot parse mount options: %w", err)
}
// build remote string from fsName, fsType, fsOpt, fsPath
colon := ":"
comma := ","
@@ -191,6 +178,150 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
return vol.validate()
}
func getMountOption(mntOpt *mountlib.Options, opt rc.Params, key string) (ok bool, err error) {
ok = true
switch normalOptName(key) {
case "debug-fuse":
mntOpt.DebugFUSE, err = opt.GetBool(key)
case "attr-timeout":
mntOpt.AttrTimeout, err = opt.GetFsDuration(key)
case "option":
mntOpt.ExtraOptions, err = getStringArray(opt, key)
case "fuse-flag":
mntOpt.ExtraFlags, err = getStringArray(opt, key)
case "daemon":
mntOpt.Daemon, err = opt.GetBool(key)
case "daemon-timeout":
mntOpt.DaemonTimeout, err = opt.GetFsDuration(key)
case "default-permissions":
mntOpt.DefaultPermissions, err = opt.GetBool(key)
case "allow-non-empty":
mntOpt.AllowNonEmpty, err = opt.GetBool(key)
case "allow-root":
mntOpt.AllowRoot, err = opt.GetBool(key)
case "allow-other":
mntOpt.AllowOther, err = opt.GetBool(key)
case "async-read":
mntOpt.AsyncRead, err = opt.GetBool(key)
case "max-read-ahead":
err = getFVarP(&mntOpt.MaxReadAhead, opt, key)
case "write-back-cache":
mntOpt.WritebackCache, err = opt.GetBool(key)
case "volname":
mntOpt.VolumeName, err = opt.GetString(key)
case "noappledouble":
mntOpt.NoAppleDouble, err = opt.GetBool(key)
case "noapplexattr":
mntOpt.NoAppleXattr, err = opt.GetBool(key)
case "network-mode":
mntOpt.NetworkMode, err = opt.GetBool(key)
default:
ok = false
}
return
}
func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool, err error) {
var intVal int64
ok = true
switch normalOptName(key) {
// options prefixed with "vfs-"
case "vfs-cache-mode":
err = getFVarP(&vfsOpt.CacheMode, opt, key)
case "vfs-cache-poll-interval":
vfsOpt.CachePollInterval, err = opt.GetFsDuration(key)
case "vfs-cache-max-age":
vfsOpt.CacheMaxAge, err = opt.GetFsDuration(key)
case "vfs-cache-max-size":
err = getFVarP(&vfsOpt.CacheMaxSize, opt, key)
case "vfs-read-chunk-size":
err = getFVarP(&vfsOpt.ChunkSize, opt, key)
case "vfs-read-chunk-size-limit":
err = getFVarP(&vfsOpt.ChunkSizeLimit, opt, key)
case "vfs-case-insensitive":
vfsOpt.CaseInsensitive, err = opt.GetBool(key)
case "vfs-write-wait":
vfsOpt.WriteWait, err = opt.GetFsDuration(key)
case "vfs-read-wait":
vfsOpt.ReadWait, err = opt.GetFsDuration(key)
case "vfs-write-back":
vfsOpt.WriteBack, err = opt.GetFsDuration(key)
case "vfs-read-ahead":
err = getFVarP(&vfsOpt.ReadAhead, opt, key)
case "vfs-used-is-size":
vfsOpt.UsedIsSize, err = opt.GetBool(key)
case "vfs-read-chunk-streams":
intVal, err = opt.GetInt64(key)
if err == nil {
if intVal >= 0 && intVal <= math.MaxInt {
vfsOpt.ChunkStreams = int(intVal)
} else {
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
}
}
// unprefixed vfs options
case "no-modtime":
vfsOpt.NoModTime, err = opt.GetBool(key)
case "no-checksum":
vfsOpt.NoChecksum, err = opt.GetBool(key)
case "dir-cache-time":
vfsOpt.DirCacheTime, err = opt.GetFsDuration(key)
case "poll-interval":
vfsOpt.PollInterval, err = opt.GetFsDuration(key)
case "read-only":
vfsOpt.ReadOnly, err = opt.GetBool(key)
case "dir-perms":
err = getFVarP(&vfsOpt.DirPerms, opt, key)
case "file-perms":
err = getFVarP(&vfsOpt.FilePerms, opt, key)
// unprefixed unix-only vfs options
case "umask":
err = getFVarP(&vfsOpt.Umask, opt, key)
case "uid":
intVal, err = opt.GetInt64(key)
if err == nil {
if intVal >= 0 && intVal <= math.MaxUint32 {
vfsOpt.UID = uint32(intVal)
} else {
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
}
}
case "gid":
intVal, err = opt.GetInt64(key)
if err == nil {
if intVal >= 0 && intVal <= math.MaxUint32 {
vfsOpt.UID = uint32(intVal)
} else {
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
}
}
// non-vfs options
default:
ok = false
}
return
}
func getFVarP(pvalue pflag.Value, opt rc.Params, key string) error {
str, err := opt.GetString(key)
if err != nil {
return err
}
return pvalue.Set(str)
}
func getStringArray(opt rc.Params, key string) ([]string, error) {
str, err := opt.GetString(key)
if err != nil {
return nil, err
}
return strings.Split(str, ","), nil
}
func normalOptName(key string) string {
return strings.ReplaceAll(strings.TrimPrefix(strings.ToLower(key), "--"), "_", "-")
}

View File

@@ -1,75 +0,0 @@
package docker
import (
"testing"
"time"
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
_ "github.com/rclone/rclone/backend/local"
)
func TestApplyOptions(t *testing.T) {
vol := &Volume{
Name: "testName",
MountPoint: "testPath",
drv: &Driver{
root: "testRoot",
},
mnt: &mountlib.MountPoint{
MountPoint: "testPath",
},
mountReqs: make(map[string]interface{}),
}
// Happy path
volOpt := VolOpts{
"remote": "/tmp/docker",
"persist": "FALSE",
"mount_type": "potato",
// backend options
"--local-case-sensitive": "true",
"local_no_check_updated": "1",
// mount options
"debug-fuse": "true",
"attr_timeout": "100s",
"--async-read": "TRUE",
// vfs options
"no-modtime": "1",
"no_checksum": "true",
"--no-seek": "true",
}
err := vol.applyOptions(volOpt)
require.NoError(t, err)
// normal options
assert.Equal(t, ":local,case_sensitive='true',no_check_updated='1':/tmp/docker", vol.fsString)
assert.Equal(t, false, vol.persist)
assert.Equal(t, "potato", vol.mountType)
// mount options
assert.Equal(t, true, vol.mnt.MountOpt.DebugFUSE)
assert.Equal(t, fs.Duration(100*time.Second), vol.mnt.MountOpt.AttrTimeout)
assert.Equal(t, true, vol.mnt.MountOpt.AsyncRead)
// vfs options
assert.Equal(t, true, vol.mnt.VFSOpt.NoModTime)
assert.Equal(t, true, vol.mnt.VFSOpt.NoChecksum)
assert.Equal(t, true, vol.mnt.VFSOpt.NoSeek)
// Check errors
err = vol.applyOptions(VolOpts{
"debug-fuse": "POTATO",
})
require.ErrorContains(t, err, "cannot parse mount options")
err = vol.applyOptions(VolOpts{
"no-modtime": "POTATO",
})
require.ErrorContains(t, err, "cannot parse vfs options")
err = vol.applyOptions(VolOpts{
"remote": "/tmp/docker",
"local_not_found": "POTATO",
})
require.ErrorContains(t, err, "unsupported backend option")
}

View File

@@ -24,12 +24,6 @@ import (
nfshelper "github.com/willscott/go-nfs/helpers"
)
// Errors on cache initialisation
var (
ErrorSymlinkCacheNotSupported = errors.New("symlink cache not supported on " + runtime.GOOS)
ErrorSymlinkCacheNoPermission = errors.New("symlink cache must be run as root or with CAP_DAC_READ_SEARCH")
)
// Cache controls the file handle cache implementation
type Cache interface {
// ToHandle takes a file and represents it with an opaque handle to reference it.
@@ -49,35 +43,25 @@ type Cache interface {
// Set the cache of the handler to the type required by the user
func (h *Handler) getCache() (c Cache, err error) {
fs.Debugf("nfs", "Starting %v handle cache", h.opt.HandleCache)
switch h.opt.HandleCache {
case cacheMemory:
return nfshelper.NewCachingHandler(h, h.opt.HandleLimit), nil
case cacheDisk:
return newDiskHandler(h)
case cacheSymlink:
dh, err := newDiskHandler(h)
if err != nil {
return nil, err
if runtime.GOOS != "linux" {
return nil, errors.New("can only use symlink cache on Linux")
}
err = dh.makeSymlinkCache()
if err != nil {
return nil, err
}
return dh, nil
return nil, errors.New("FIXME not implemented yet")
}
return nil, errors.New("unknown handle cache type")
}
// diskHandler implements an on disk NFS file handle cache
type diskHandler struct {
mu sync.RWMutex
cacheDir string
billyFS billy.Filesystem
write func(fh []byte, cachePath string, fullPath string) ([]byte, error)
read func(fh []byte, cachePath string) ([]byte, error)
remove func(fh []byte, cachePath string) error
handleType int32 //nolint:unused // used by the symlink cache
mu sync.RWMutex
cacheDir string
billyFS billy.Filesystem
}
// Create a new disk handler
@@ -99,9 +83,6 @@ func newDiskHandler(h *Handler) (dh *diskHandler, err error) {
dh = &diskHandler{
cacheDir: cacheDir,
billyFS: h.billyFS,
write: dh.diskCacheWrite,
read: dh.diskCacheRead,
remove: dh.diskCacheRemove,
}
fs.Infof("nfs", "Storing handle cache in %q", dh.cacheDir)
return dh, nil
@@ -139,7 +120,7 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
fs.Errorf("nfs", "Couldn't create cache file handle directory: %v", err)
return fh
}
fh, err = dh.write(fh, cachePath, fullPath)
err = os.WriteFile(cachePath, []byte(fullPath), 0600)
if err != nil {
fs.Errorf("nfs", "Couldn't create cache file handle: %v", err)
return fh
@@ -147,11 +128,6 @@ func (dh *diskHandler) ToHandle(f billy.Filesystem, splitPath []string) (fh []by
return fh
}
// Write the fullPath into cachePath returning the possibly updated fh
func (dh *diskHandler) diskCacheWrite(fh []byte, cachePath string, fullPath string) ([]byte, error) {
return fh, os.WriteFile(cachePath, []byte(fullPath), 0600)
}
var errStaleHandle = &nfs.NFSStatusError{NFSStatus: nfs.NFSStatusStale}
// FromHandle converts from an opaque handle to the file it represents
@@ -159,7 +135,7 @@ func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []st
dh.mu.RLock()
defer dh.mu.RUnlock()
cachePath := dh.handleToPath(fh)
fullPathBytes, err := dh.read(fh, cachePath)
fullPathBytes, err := os.ReadFile(cachePath)
if err != nil {
fs.Errorf("nfs", "Stale handle %q: %v", cachePath, err)
return nil, nil, errStaleHandle
@@ -168,28 +144,18 @@ func (dh *diskHandler) FromHandle(fh []byte) (f billy.Filesystem, splitPath []st
return dh.billyFS, splitPath, nil
}
// Read the contents of (fh, cachePath)
func (dh *diskHandler) diskCacheRead(fh []byte, cachePath string) ([]byte, error) {
return os.ReadFile(cachePath)
}
// Invalidate the handle passed - used on rename and delete
func (dh *diskHandler) InvalidateHandle(f billy.Filesystem, fh []byte) error {
dh.mu.Lock()
defer dh.mu.Unlock()
cachePath := dh.handleToPath(fh)
err := dh.remove(fh, cachePath)
err := os.Remove(cachePath)
if err != nil {
fs.Errorf("nfs", "Failed to remove handle %q: %v", cachePath, err)
}
return nil
}
// Remove the (fh, cachePath) file
func (dh *diskHandler) diskCacheRemove(fh []byte, cachePath string) error {
return os.Remove(cachePath)
}
// HandleLimit exports how many file handles can be safely stored by this cache.
func (dh *diskHandler) HandleLimit() int {
return math.MaxInt

View File

@@ -13,9 +13,6 @@ import (
"github.com/stretchr/testify/require"
)
// NB to test the symlink cache, running with elevated permissions is needed
const testSymlinkCache = "go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink"
// Check basic CRUD operations
func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) {
// Check reading a non existent handle returns an error
@@ -104,12 +101,11 @@ func TestCache(t *testing.T) {
ci := fs.GetConfig(context.Background())
oldLogLevel := ci.LogLevel
ci.LogLevel = fs.LogLevelEmergency
//ci.LogLevel = fs.LogLevelDebug
defer func() {
ci.LogLevel = oldLogLevel
}()
billyFS := &FS{nil} // place holder billyFS
for _, cacheType := range []handleCache{cacheMemory, cacheDisk, cacheSymlink} {
for _, cacheType := range []handleCache{cacheMemory, cacheDisk} {
cacheType := cacheType
t.Run(cacheType.String(), func(t *testing.T) {
h := &Handler{
@@ -119,27 +115,8 @@ func TestCache(t *testing.T) {
h.opt.HandleCache = cacheType
h.opt.HandleCacheDir = t.TempDir()
c, err := h.getCache()
if err == ErrorSymlinkCacheNotSupported {
t.Skip(err.Error())
}
if err == ErrorSymlinkCacheNoPermission {
t.Skip("Need more permissions to run symlink cache tests: " + testSymlinkCache)
}
require.NoError(t, err)
t.Run("Empty", func(t *testing.T) {
// Write a handle
splitPath := []string{""}
fh := c.ToHandle(h.billyFS, splitPath)
assert.True(t, len(fh) > 0)
// Read the handle back
newFs, newSplitPath, err := c.FromHandle(fh)
require.NoError(t, err)
assert.Equal(t, h.billyFS, newFs)
assert.Equal(t, splitPath, newSplitPath)
testCacheCRUD(t, h, c, "file")
})
t.Run("CRUD", func(t *testing.T) {
testCacheCRUD(t, h, c, "file")
})

View File

@@ -3,6 +3,7 @@
package nfs
import (
"math"
"os"
"path"
"strings"
@@ -36,7 +37,7 @@ func setSys(fi os.FileInfo) {
Nlink: 1,
UID: vfs.Opt.UID,
GID: vfs.Opt.GID,
Fileid: node.Inode(), // without this mounting doesn't work on Linux
Fileid: math.MaxUint64, // without this mounting doesn't work on Linux
}
node.SetSys(&stat)
}
@@ -141,16 +142,16 @@ func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
return fi, nil
}
// Symlink creates a link pointing to target
// Symlink is not supported over NFS
func (f *FS) Symlink(target, link string) (err error) {
defer log.Trace(target, "link=%q", link)("err=%v", &err)
return f.vfs.Symlink(target, link)
return os.ErrInvalid
}
// Readlink reads the contents of link
// Readlink is not supported
func (f *FS) Readlink(link string) (result string, err error) {
defer log.Trace(link, "")("result=%q, err=%v", &result, &err)
return f.vfs.Readlink(link)
return "", os.ErrInvalid
}
// Chmod changes the file modes
@@ -194,7 +195,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
return file.Chown(uid, gid)
}
// Chtimes changes the access time and modified time
// Chtimes changes the acces time and modified time
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
return f.vfs.Chtimes(name, atime, mtime)

View File

@@ -145,9 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
symlinks. Rclone will use the handle of the underlying file as the NFS
handle which improves performance. This sort of cache can't be backed
up and restored as the underlying handles will change. This is Linux
only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
You can run rclone with this extra permission by doing this to the
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.
only.
|--nfs-cache-handle-limit| controls the maximum number of cached NFS
handles stored by the caching handler. This should not be set too low

View File

@@ -1,177 +0,0 @@
//go:build unix && linux
/*
This implements an efficient disk cache for the NFS file handles for
Linux only.
1. The destination paths are stored as symlink destinations. These
can be stored in the directory for maximum efficiency.
2. The on disk handle of the cache file is returned to NFS with
name_to_handle_at(). This means that if the cache is deleted and
restored, the file handle mapping will be lost.
3. These handles are looked up with open_by_handle_at() so no
searching through directory trees is needed.
Note that open_by_handle_at requires CAP_DAC_READ_SEARCH so rclone
will need to be run as root or with elevated permissions.
Test with
go test -c && sudo setcap cap_dac_read_search+ep ./nfs.test && ./nfs.test -test.v -test.run TestCache/symlink
*/
package nfs
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/rclone/rclone/fs"
"golang.org/x/sys/unix"
)
// emptyPath is written instead of "" as symlinks can't be empty
var (
emptyPath = "\x01"
emptyPathBytes = []byte(emptyPath)
)
// Turn the diskHandler into a symlink cache
//
// This also tests the cache works as it may not have enough
// permissions or have be the correct Linux version.
func (dh *diskHandler) makeSymlinkCache() error {
path := filepath.Join(dh.cacheDir, "test")
fullPath := "testpath"
fh := []byte{1, 2, 3, 4, 5}
// Create a symlink
newFh, err := dh.symlinkCacheWrite(fh, path, fullPath)
fs.Debugf(nil, "newFh = %q", newFh)
if err != nil {
return fmt.Errorf("symlink cache write test failed: %w", err)
}
defer func() {
_ = os.Remove(path)
}()
// Read it back
newFullPath, err := dh.symlinkCacheRead(newFh, path)
fs.Debugf(nil, "newFullPath = %q", newFullPath)
if err != nil {
if errors.Is(err, syscall.EPERM) {
return ErrorSymlinkCacheNoPermission
}
return fmt.Errorf("symlink cache read test failed: %w", err)
}
// Check result all OK
if string(newFullPath) != fullPath {
return fmt.Errorf("symlink cache read test failed: expecting %q read %q", string(newFullPath), fullPath)
}
// If OK install symlink cache
dh.read = dh.symlinkCacheRead
dh.write = dh.symlinkCacheWrite
dh.remove = dh.symlinkCacheRemove
return nil
}
// Write the fullPath into cachePath returning the possibly updated fh
//
// This writes the fullPath into the file with the cachePath given and
// returns the handle for that file so we can look it up later.
func (dh *diskHandler) symlinkCacheWrite(fh []byte, cachePath string, fullPath string) (newFh []byte, err error) {
//defer log.Trace(nil, "fh=%x, cachePath=%q, fullPath=%q", fh, cachePath)("newFh=%x, err=%v", &newFh, &err)
// Can't write an empty symlink so write a substitution
if fullPath == "" {
fullPath = emptyPath
}
// Write the symlink
err = os.Symlink(fullPath, cachePath)
if err != nil && !errors.Is(err, syscall.EEXIST) {
return nil, fmt.Errorf("symlink cache create symlink: %w", err)
}
// Read the newly created symlinks handle
handle, _, err := unix.NameToHandleAt(unix.AT_FDCWD, cachePath, 0)
if err != nil {
return nil, fmt.Errorf("symlink cache name to handle at: %w", err)
}
// Store the handle type if it hasn't changed
// This should run once only when called by makeSymlinkCache
if dh.handleType != handle.Type() {
dh.handleType = handle.Type()
}
return handle.Bytes(), nil
}
// Read the contents of (fh, cachePath)
//
// This reads the symlink with the corresponding file handle and
// returns the contents. It ignores the cachePath which will be
// pointing in the wrong place.
//
// Note that the caller needs CAP_DAC_READ_SEARCH to use this.
func (dh *diskHandler) symlinkCacheRead(fh []byte, cachePath string) (fullPath []byte, err error) {
//defer log.Trace(nil, "fh=%x, cachePath=%q", fh, cachePath)("fullPath=%q, err=%v", &fullPath, &err)
// Find the file with the handle passed in
handle := unix.NewFileHandle(dh.handleType, fh)
fd, err := unix.OpenByHandleAt(unix.AT_FDCWD, handle, unix.O_RDONLY|unix.O_PATH|unix.O_NOFOLLOW) // needs O_PATH for symlinks
if err != nil {
return nil, fmt.Errorf("symlink cache open by handle at: %w", err)
}
// Close it on exit
defer func() {
newErr := unix.Close(fd)
if err != nil {
err = newErr
}
}()
// Read the symlink which is the path required
buf := make([]byte, 1024) // Max path length
n, err := unix.Readlinkat(fd, "", buf) // It will (silently) truncate the contents, in case the buffer is too small to hold all of the contents.
if err != nil {
return nil, fmt.Errorf("symlink cache read: %w", err)
}
fullPath = buf[:n:n]
// Undo empty symlink substitution
if bytes.Equal(fullPath, emptyPathBytes) {
fullPath = buf[:0:0]
}
return fullPath, nil
}
// Remove the (fh, cachePath) file
func (dh *diskHandler) symlinkCacheRemove(fh []byte, cachePath string) error {
// First read the path
fullPath, err := dh.symlinkCacheRead(fh, cachePath)
if err != nil {
return err
}
// fh for the actual cache file
fh = hashPath(string(fullPath))
// cachePath for the actual cache file
cachePath = dh.handleToPath(fh)
return os.Remove(cachePath)
}

View File

@@ -1,8 +0,0 @@
//go:build unix && !linux
package nfs
// Turn the diskHandler into a symlink cache
func (dh *diskHandler) makeSymlinkCache() error {
return ErrorSymlinkCacheNotSupported
}

View File

@@ -4,10 +4,8 @@ package proxy
import (
"bytes"
"context"
"crypto/md5"
"crypto/sha256"
"crypto/subtle"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
@@ -219,13 +217,8 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er
return nil, fmt.Errorf("proxy: couldn't find backend for %q: %w", fsName, err)
}
// Add a config hash to ensure configs with different values have different names.
// 5 characters length is 5*6 = 30 bits of base64
md5sumBinary := md5.Sum([]byte(config.String()))
configHash := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])[:5]
// base name of config on user name. This may appear in logs
name := "proxy-" + user + "-" + configHash
name := "proxy-" + user
fsString := name + ":" + root
// Look for fs in the VFS cache

View File

@@ -90,8 +90,7 @@ func TestRun(t *testing.T) {
require.NotNil(t, entry.vfs)
f := entry.vfs.Fs()
require.NotNil(t, f)
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
assert.Equal(t, len("proxy-"+testUser+"-")+5, len(f.Name()))
assert.Equal(t, "proxy-"+testUser, f.Name())
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
// check it is in the cache
@@ -109,7 +108,7 @@ func TestRun(t *testing.T) {
vfs, vfsKey, err := p.Call(testUser, testPass, false)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check it is in the cache
@@ -130,7 +129,7 @@ func TestRun(t *testing.T) {
vfs, vfsKey, err = p.Call(testUser, testPass, false)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check cache is at the same level
@@ -174,7 +173,7 @@ func TestRun(t *testing.T) {
require.NotNil(t, entry.vfs)
f := entry.vfs.Fs()
require.NotNil(t, f)
assert.True(t, strings.HasPrefix(f.Name(), "proxy-"+testUser+"-"))
assert.Equal(t, "proxy-"+testUser, f.Name())
assert.True(t, strings.HasPrefix(f.String(), "Local file system"))
// check it is in the cache
@@ -196,7 +195,7 @@ func TestRun(t *testing.T) {
)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check it is in the cache
@@ -217,7 +216,7 @@ func TestRun(t *testing.T) {
vfs, vfsKey, err = p.Call(testUser, publicKeyString, true)
require.NoError(t, err)
require.NotNil(t, vfs)
assert.True(t, strings.HasPrefix(vfs.Fs().Name(), "proxy-"+testUser+"-"))
assert.Equal(t, "proxy-"+testUser, vfs.Fs().Name())
assert.Equal(t, testUser, vfsKey)
// check cache is at the same level

View File

@@ -9,7 +9,6 @@ import (
"path"
"strings"
"sync"
"time"
"github.com/ncw/swift/v2"
"github.com/rclone/gofakes3"
@@ -19,6 +18,7 @@ import (
var (
emptyPrefix = &gofakes3.Prefix{}
timeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
)
// s3Backend implements the gofacess3.Backend interface to make an S3
@@ -52,7 +52,7 @@ func (b *s3Backend) ListBuckets(ctx context.Context) ([]gofakes3.BucketInfo, err
for _, entry := range dirEntries {
if entry.IsDir() {
response = append(response, gofakes3.BucketInfo{
Name: entry.Name(),
Name: gofakes3.URLEncode(entry.Name()),
CreationDate: gofakes3.NewContentTime(entry.ModTime()),
})
}
@@ -98,13 +98,6 @@ func (b *s3Backend) ListBucket(ctx context.Context, bucket string, prefix *gofak
return b.pager(response, page)
}
// formatHeaderTime makes an timestamp which is the same as that used by AWS.
//
// This is like RFC1123 always in UTC, but has GMT instead of UTC
func formatHeaderTime(t time.Time) string {
return t.UTC().Format("Mon, 02 Jan 2006 15:04:05") + " GMT"
}
// HeadObject returns the fileinfo for the given object name.
//
// Note that the metadata is not supported yet.
@@ -138,7 +131,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
hash := getFileHashByte(fobj)
meta := map[string]string{
"Last-Modified": formatHeaderTime(node.ModTime()),
"Last-Modified": node.ModTime().Format(timeFormat),
"Content-Type": fs.MimeType(context.Background(), fobj),
}
@@ -158,7 +151,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
}, nil
}
// GetObject fetches the object from the filesystem.
// GetObject fetchs the object from the filesystem.
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
_vfs, err := b.s.getVFS(ctx)
if err != nil {
@@ -215,7 +208,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
}
meta := map[string]string{
"Last-Modified": formatHeaderTime(node.ModTime()),
"Last-Modified": node.ModTime().Format(timeFormat),
"Content-Type": fs.MimeType(context.Background(), fobj),
}
@@ -227,7 +220,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string
}
return &gofakes3.Object{
Name: objectName,
Name: gofakes3.URLEncode(objectName),
Hash: hash,
Metadata: meta,
Size: size,
@@ -400,7 +393,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
}
fp := path.Join(bucketName, objectName)
// S3 does not report an error when attempting to delete a key that does not exist, so
// S3 does not report an error when attemping to delete a key that does not exist, so
// we need to skip IsNotExist errors.
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
return err

View File

@@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
for _, entry := range dirEntries {
object := entry.Name()
// workaround for control-chars detect
// workround for control-chars detect
objectPath := path.Join(fdPath, object)
if !strings.HasPrefix(object, name) {
@@ -28,7 +28,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
if entry.IsDir() {
if addPrefix {
response.AddPrefix(objectPath)
response.AddPrefix(gofakes3.URLEncode(objectPath))
continue
}
err := b.entryListR(_vfs, bucket, path.Join(fdPath, object), "", false, response)
@@ -37,7 +37,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
}
} else {
item := &gofakes3.Content{
Key: objectPath,
Key: gofakes3.URLEncode(objectPath),
LastModified: gofakes3.NewContentTime(entry.ModTime()),
ETag: getFileHash(entry),
Size: entry.Size(),

View File

@@ -69,7 +69,7 @@ secret_access_key = SECRET_ACCESS_KEY
use_multipart_uploads = false
```
Note that setting `use_multipart_uploads = false` is to work around
Note that setting `disable_multipart_uploads = true` is to work around
[a bug](#bugs) which will be fixed in due course.
### Bugs

View File

@@ -65,7 +65,7 @@ func (s *server) getVFS(what string, sshConn *ssh.ServerConn) (VFS *vfs.VFS) {
if s.proxy == nil {
return s.vfs
}
if sshConn.Permissions == nil || sshConn.Permissions.Extensions == nil {
if sshConn.Permissions == nil && sshConn.Permissions.Extensions == nil {
fs.Infof(what, "SSH Permissions Extensions not found")
return nil
}
@@ -143,13 +143,8 @@ func (s *server) serve() (err error) {
authKeysFile := env.ShellExpand(s.opt.AuthorizedKeys)
authorizedKeysMap, err = loadAuthorizedKeys(authKeysFile)
// If user set the flag away from the default then report an error
if s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
if err != nil {
return err
}
if len(authorizedKeysMap) == 0 {
return fmt.Errorf("failed to parse authorized keys")
}
if err != nil && s.opt.AuthorizedKeys != Opt.AuthorizedKeys {
return err
}
fs.Logf(nil, "Loaded %d authorized keys from %q", len(authorizedKeysMap), authKeysFile)
}
@@ -354,10 +349,11 @@ func loadAuthorizedKeys(authorizedKeysPath string) (authorizedKeysMap map[string
authorizedKeysMap = make(map[string]struct{})
for len(authorizedKeysBytes) > 0 {
pubKey, _, _, rest, err := ssh.ParseAuthorizedKey(authorizedKeysBytes)
if err == nil {
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
authorizedKeysBytes = bytes.TrimSpace(rest)
if err != nil {
return nil, fmt.Errorf("failed to parse authorized keys: %w", err)
}
authorizedKeysMap[string(pubKey.Marshal())] = struct{}{}
authorizedKeysBytes = bytes.TrimSpace(rest)
}
return authorizedKeysMap, nil
}

View File

@@ -4,6 +4,7 @@ package size
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
@@ -71,13 +72,13 @@ of the size command.
count := strconv.FormatInt(results.Count, 10)
countSuffix := fs.CountSuffix(results.Count).String()
if count == countSuffix {
operations.SyncPrintf("Total objects: %s\n", count)
fmt.Printf("Total objects: %s\n", count)
} else {
operations.SyncPrintf("Total objects: %s (%s)\n", countSuffix, count)
fmt.Printf("Total objects: %s (%s)\n", countSuffix, count)
}
operations.SyncPrintf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
fmt.Printf("Total size: %s (%d Byte)\n", fs.SizeSuffix(results.Bytes).ByteUnit(), results.Bytes)
if results.Sizeless > 0 {
operations.SyncPrintf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
fmt.Printf("Total objects with unknown size: %s (%d)\n", fs.CountSuffix(results.Sizeless), results.Sizeless)
}
return nil
})

View File

@@ -30,7 +30,6 @@ var (
maxFileSize = fs.SizeSuffix(100)
minFileNameLength = 4
maxFileNameLength = 12
flat = false
seed = int64(1)
zero = false
sparse = false
@@ -56,7 +55,6 @@ func init() {
flags.FVarP(makefilesFlags, &maxFileSize, "max-file-size", "", "Maximum size of files to create", "")
flags.IntVarP(makefilesFlags, &minFileNameLength, "min-name-length", "", minFileNameLength, "Minimum size of file names", "")
flags.IntVarP(makefilesFlags, &maxFileNameLength, "max-name-length", "", maxFileNameLength, "Maximum size of file names", "")
flags.BoolVarP(makefilesFlags, &flat, "flat", "", false, "If set create all files in the root directory", "")
test.Command.AddCommand(makefileCmd)
makefileFlags := makefileCmd.Flags()
@@ -83,9 +81,6 @@ var makefilesCmd = &cobra.Command{
commonInit()
outputDirectory := args[0]
directoriesToCreate = numberOfFiles / averageFilesPerDirectory
if flat {
directoriesToCreate = 0
}
averageSize := (minFileSize + maxFileSize) / 2
start := time.Now()
fs.Logf(nil, "Creating %d files of average size %v in %d directories in %q.", numberOfFiles, averageSize, directoriesToCreate, outputDirectory)

Some files were not shown because too many files have changed in this diff Show More