1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-04 09:33:36 +00:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Nick Craig-Wood
a80287effd crypt: fix tests after introduction of no data encryption 2021-11-15 18:03:13 +00:00
Nick Craig-Wood
4216d55a05 test: make sure we run wrapping backend tests in "make quicktest" 2021-11-15 18:03:13 +00:00
164 changed files with 1243 additions and 4958 deletions

View File

@@ -40,7 +40,7 @@ jobs:
deploy: true
- job_name: mac_amd64
os: macos-11
os: macOS-latest
go: '1.17.x'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
@@ -49,10 +49,10 @@ jobs:
deploy: true
- job_name: mac_arm64
os: macos-11
os: macOS-latest
go: '1.17.x'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -macos-sdk macosx11.1 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows_amd64
@@ -79,7 +79,7 @@ jobs:
- job_name: other_os
os: ubuntu-latest
go: '1.17.x'
build_flags: '-exclude "^(windows/(386|amd64)|darwin/|linux/)"'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
@@ -134,7 +134,7 @@ jobs:
run: |
brew update
brew install --cask macfuse
if: matrix.os == 'macos-11'
if: matrix.os == 'macOS-latest'
- name: Install Libraries on Windows
shell: powershell

View File

@@ -15,7 +15,7 @@ Current active maintainers of rclone are:
| Ivan Andreev | @ivandeex | chunker & mailru backends |
| Max Sum | @Max-Sum | union backend |
| Fred | @creativeprojects | seafile backend |
| Caleb Case | @calebcase | storj backend |
| Caleb Case | @calebcase | tardigrade backend |
**This is a work in progress Draft**

View File

@@ -1,5 +1,4 @@
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@@ -21,7 +20,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
## Storage providers
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
@@ -30,7 +28,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
* Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
* Enterprise File Fabric [:page_facing_up:](https://rclone.org/filefabric/)
@@ -68,8 +65,8 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* Storj [:page_facing_up:](https://rclone.org/storj/)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)

View File

@@ -28,7 +28,6 @@ import (
_ "github.com/rclone/rclone/backend/mailru"
_ "github.com/rclone/rclone/backend/mega"
_ "github.com/rclone/rclone/backend/memory"
_ "github.com/rclone/rclone/backend/netstorage"
_ "github.com/rclone/rclone/backend/onedrive"
_ "github.com/rclone/rclone/backend/opendrive"
_ "github.com/rclone/rclone/backend/pcloud"
@@ -40,9 +39,9 @@ import (
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/storj"
_ "github.com/rclone/rclone/backend/sugarsync"
_ "github.com/rclone/rclone/backend/swift"
_ "github.com/rclone/rclone/backend/tardigrade"
_ "github.com/rclone/rclone/backend/union"
_ "github.com/rclone/rclone/backend/uptobox"
_ "github.com/rclone/rclone/backend/webdav"

View File

@@ -50,6 +50,8 @@ const (
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
storageDefaultBaseURL = "blob.core.windows.net"
defaultChunkSize = 4 * fs.Mebi
maxChunkSize = 100 * fs.Mebi
uploadConcurrency = 4
defaultAccessTier = azblob.AccessTierNone
maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing)
// Default storage account, key and blob endpoint for emulator support,
@@ -132,33 +134,12 @@ msi_client_id, or msi_mi_res_id parameters.`,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Upload chunk size.
Help: `Upload chunk size (<= 100 MiB).
Note that this is stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
"--transfers" chunks stored at once in memory.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large files over high-speed
links and these uploads do not fully utilize your bandwidth, then
increasing this may help to speed up the transfers.
In tests, upload speed increases almost linearly with upload
concurrency. For example to fill a gigabit pipe it may be necessary to
raise this to 64. Note that this will use more memory.
Note that chunks are stored in memory and there may be up to
"--transfers" * "--azureblob-upload-concurrency" chunks stored at once
in memory.`,
Default: 16,
Advanced: true,
}, {
Name: "list_chunk",
Help: `Size of blob list.
@@ -276,7 +257,6 @@ type Options struct {
Endpoint string `config:"endpoint"`
SASURL string `config:"sas_url"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
UploadConcurrency int `config:"upload_concurrency"`
ListChunkSize uint `config:"list_chunk"`
AccessTier string `config:"access_tier"`
ArchiveTierDelete bool `config:"archive_tier_delete"`
@@ -436,6 +416,9 @@ func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
if cs > maxChunkSize {
return fmt.Errorf("%s is greater than %s", cs, maxChunkSize)
}
return nil
}
@@ -1461,10 +1444,6 @@ func (o *Object) clearMetaData() {
// o.size
// o.md5
func (o *Object) readMetaData() (err error) {
container, _ := o.split()
if !o.fs.containerOK(container) {
return fs.ErrorObjectNotFound
}
if !o.modTime.IsZero() {
return nil
}
@@ -1657,10 +1636,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return errCantUpdateArchiveTierBlobs
}
}
container, containerPath := o.split()
if container == "" || containerPath == "" {
return fmt.Errorf("can't upload to root - need a container")
}
container, _ := o.split()
err = o.fs.makeContainer(ctx, container)
if err != nil {
return err
@@ -1691,10 +1667,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
BufferSize: int(o.fs.opt.ChunkSize),
MaxBuffers: o.fs.opt.UploadConcurrency,
MaxBuffers: uploadConcurrency,
Metadata: o.meta,
BlobHTTPHeaders: httpHeaders,
TransferManager: o.fs.newPoolWrapper(o.fs.opt.UploadConcurrency),
TransferManager: o.fs.newPoolWrapper(uploadConcurrency),
}
// Don't retry, return a retry error instead

View File

@@ -17,10 +17,12 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{},
RemoteName: "TestAzureBlob:",
NilObject: (*Object)(nil),
TiersToTest: []string{"Hot", "Cool"},
ChunkedUpload: fstests.ChunkedUploadConfig{
MaxChunkSize: maxChunkSize,
},
})
}

View File

@@ -160,15 +160,7 @@ free egress for data downloaded through the Cloudflare network.
Rclone works with private buckets by sending an "Authorization" header.
If the custom endpoint rewrites the requests for authentication,
e.g., in Cloudflare Workers, this header needs to be handled properly.
Leave blank if you want to use the endpoint provided by Backblaze.
The URL provided here SHOULD have the protocol and SHOULD NOT have
a trailing slash or specify the /file/bucket subpath as rclone will
request files with "{download_url}/file/{bucket_name}/{path}".
Example:
> https://mysubdomain.mydomain.tld
(No trailing "/", "file" or "bucket")`,
Leave blank if you want to use the endpoint provided by Backblaze.`,
Advanced: true,
}, {
Name: "download_auth_duration",

View File

@@ -44,6 +44,7 @@ func TestIntegration(t *testing.T) {
"UserInfo",
"Disconnect",
},
QuickTestOK: true,
}
if *fstest.RemoteName == "" {
name := "TestChunker"

View File

@@ -401,10 +401,6 @@ func isCompressible(r io.Reader) (bool, error) {
if err != nil {
return false, err
}
err = w.Close()
if err != nil {
return false, err
}
ratio := float64(n) / float64(b.Len())
return ratio > minCompressionRatio, nil
}
@@ -630,11 +626,9 @@ func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.Objec
// Put the data
mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...)
if err != nil {
if mo != nil {
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
removeErr := mo.Remove(ctx)
if removeErr != nil {
fs.Errorf(mo, "Failed to remove partially transferred object: %v", err)
}
return nil, err
}

View File

@@ -16,6 +16,9 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
if *fstest.RemoteName == "" {
t.Skip("Skipping as -remote not set")
}
opt := fstests.Opt{
RemoteName: *fstest.RemoteName,
NilObject: (*Object)(nil),
@@ -61,5 +64,6 @@ func TestRemoteGzip(t *testing.T) {
{Name: name, Key: "remote", Value: tempdir},
{Name: name, Key: "compression_mode", Value: "gzip"},
},
QuickTestOK: true,
})
}

View File

@@ -443,7 +443,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
@@ -1049,6 +1049,10 @@ func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
}
// if this is wrapping a local object then we work out the hash
if srcObj.Fs().Features().IsLocal {
if o.f.opt.NoDataEncryption {
// If no encryption, just return the hash of the underlying object
return srcObj.Hash(ctx, hash)
}
// Read the data and encrypt it to calculate the hash
fs.Debugf(o, "Computing %v hash of encrypted source", hash)
return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash)

View File

@@ -77,7 +77,11 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
enc, err := f.cipher.newEncrypter(inBuf, nil)
require.NoError(t, err)
nonce := enc.nonce // read the nonce at the start
_, err = io.Copy(&outBuf, enc)
if f.opt.NoDataEncryption {
_, err = outBuf.WriteString(contents)
} else {
_, err = io.Copy(&outBuf, enc)
}
require.NoError(t, err)
var oi fs.ObjectInfo = obj
@@ -96,7 +100,12 @@ func testObjectInfo(t *testing.T, f *Fs, wrap bool) {
assert.NotEqual(t, path, src.Remote())
// Test ObjectInfo.Hash
wantHash := md5.Sum(outBuf.Bytes())
var wantHash [md5.Size]byte
if f.opt.NoDataEncryption {
wantHash = md5.Sum([]byte(contents))
} else {
wantHash = md5.Sum(outBuf.Bytes())
}
gotHash, err := src.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, fmt.Sprintf("%x", wantHash), gotHash)

View File

@@ -46,6 +46,7 @@ func TestStandardBase32(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -67,6 +68,7 @@ func TestStandardBase64(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -88,6 +90,7 @@ func TestStandardBase32768(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -109,6 +112,7 @@ func TestOff(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -131,6 +135,7 @@ func TestObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -154,5 +159,6 @@ func TestNoDataObfuscate(t *testing.T) {
SkipBadWindowsCharacters: true,
UnimplementableFsMethods: []string{"OpenWriterAt"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -84,7 +84,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
RedirectURL: oauthutil.TitleBarRedirectURL,
}
_mimeTypeToExtensionDuplicates = map[string]string{
"application/x-vnd.oasis.opendocument.presentation": ".odp",
@@ -299,17 +299,6 @@ a non root folder as its starting point.
Default: true,
Help: "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead.",
Advanced: true,
}, {
Name: "copy_shortcut_content",
Default: false,
Help: `Server side copy contents of shortcuts instead of the shortcut.
When doing server side copies, normally rclone will copy shortcuts as
shortcuts.
If this flag is used then rclone will copy the contents of shortcuts
rather than shortcuts themselves when doing server side copies.`,
Advanced: true,
}, {
Name: "skip_gdocs",
Default: false,
@@ -553,14 +542,6 @@ Google don't document so it may break in the future.
Normally rclone dereferences shortcut files making them appear as if
they are the original file (see [the shortcuts section](#shortcuts)).
If this flag is set then rclone will ignore shortcut files completely.
`,
Advanced: true,
Default: false,
}, {
Name: "skip_dangling_shortcuts",
Help: `If set skip dangling shortcut files.
If this is set then rclone will not show any dangling shortcuts in listings.
`,
Advanced: true,
Default: false,
@@ -597,7 +578,6 @@ type Options struct {
TeamDriveID string `config:"team_drive"`
AuthOwnerOnly bool `config:"auth_owner_only"`
UseTrash bool `config:"use_trash"`
CopyShortcutContent bool `config:"copy_shortcut_content"`
SkipGdocs bool `config:"skip_gdocs"`
SkipChecksumGphotos bool `config:"skip_checksum_gphotos"`
SharedWithMe bool `config:"shared_with_me"`
@@ -624,7 +604,6 @@ type Options struct {
StopOnUploadLimit bool `config:"stop_on_upload_limit"`
StopOnDownloadLimit bool `config:"stop_on_download_limit"`
SkipShortcuts bool `config:"skip_shortcuts"`
SkipDanglingShortcuts bool `config:"skip_dangling_shortcuts"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -927,11 +906,6 @@ OUTER:
if err != nil {
return false, fmt.Errorf("list: %w", err)
}
// leave the dangling shortcut out of the listings
// we've already logged about the dangling shortcut in resolveShortcut
if f.opt.SkipDanglingShortcuts && item.MimeType == shortcutMimeTypeDangling {
continue
}
}
// Check the case of items is correct since
// the `=` operator is case insensitive.
@@ -1597,15 +1571,6 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
}
}
// If using a link type export and a more specific export
// hasn't been found all docs should be exported
for _, _extension := range f.exportExtensions {
_mimeType := mime.TypeByExtension(_extension)
if isLinkMimeType(_mimeType) {
return _extension, _mimeType, true
}
}
// else return empty
return "", "", isDocument
}
@@ -1616,14 +1581,6 @@ func (f *Fs) findExportFormatByMimeType(ctx context.Context, itemMimeType string
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
func (f *Fs) findExportFormat(ctx context.Context, item *drive.File) (extension, filename, mimeType string, isDocument bool) {
// If item has MD5 sum it is a file stored on drive
if item.Md5Checksum != "" {
return
}
// Folders can't be documents
if item.MimeType == driveFolderType {
return
}
extension, mimeType, isDocument = f.findExportFormatByMimeType(ctx, item.MimeType)
if extension != "" {
filename = item.Name + extension
@@ -2417,16 +2374,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
createInfo.Description = ""
}
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
// get the ID of the thing to copy - this is the shortcut if available
id := shortcutID(srcObj.id)
if f.opt.CopyShortcutContent {
id = actualID(srcObj.id)
}
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Copy(id, createInfo).

View File

@@ -422,7 +422,11 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
require.NoError(t, err)
o := obj.(*Object)
dir := t.TempDir()
dir, err := ioutil.TempDir("", "rclone-drive-copyid-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(dir)
}()
checkFile := func(name string) {
filePath := filepath.Join(dir, name)
@@ -487,11 +491,19 @@ func (f *Fs) InternalTestAgeQuery(t *testing.T) {
subFs, isDriveFs := subFsResult.(*Fs)
require.True(t, isDriveFs)
tempDir1 := t.TempDir()
tempDir1, err := ioutil.TempDir("", "rclone-drive-agequery1-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir1)
}()
tempFs1, err := fs.NewFs(defCtx, tempDir1)
require.NoError(t, err)
tempDir2 := t.TempDir()
tempDir2, err := ioutil.TempDir("", "rclone-drive-agequery2-test")
require.NoError(t, err)
defer func() {
_ = os.RemoveAll(tempDir2)
}()
tempFs2, err := fs.NewFs(defCtx, tempDir2)
require.NoError(t, err)

View File

@@ -42,15 +42,18 @@ func init() {
}, {
Help: "If you want to download a shared folder, add this parameter.",
Name: "shared_folder",
Required: false,
Advanced: true,
}, {
Help: "If you want to download a shared file that is password protected, add this parameter.",
Name: "file_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
Name: "folder_password",
Required: false,
Advanced: true,
IsPassword: true,
}, {
@@ -514,32 +517,6 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return dstObj, nil
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
opts := rest.Opts{
Method: "POST",
Path: "/user/info.cgi",
ContentType: "application/json",
}
var accountInfo AccountInfo
var resp *http.Response
err = f.pacer.Call(func() (bool, error) {
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("failed to read user info: %w", err)
}
// FIXME max upload size would be useful to use in Update
usage = &fs.Usage{
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
}
return usage, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
o, err := f.NewObject(ctx, remote)

View File

@@ -182,34 +182,3 @@ type FoldersList struct {
Status string `json:"Status"`
SubFolders []Folder `json:"sub_folders"`
}
// AccountInfo is the structure how 1Fichier returns user info
type AccountInfo struct {
StatsDate string `json:"stats_date"`
MailRM string `json:"mail_rm"`
DefaultQuota int64 `json:"default_quota"`
UploadForbidden string `json:"upload_forbidden"`
PageLimit int `json:"page_limit"`
ColdStorage int64 `json:"cold_storage"`
Status string `json:"status"`
UseCDN string `json:"use_cdn"`
AvailableColdStorage int64 `json:"available_cold_storage"`
DefaultPort string `json:"default_port"`
DefaultDomain int `json:"default_domain"`
Email string `json:"email"`
DownloadMenu string `json:"download_menu"`
FTPDID int `json:"ftp_did"`
DefaultPortFiles string `json:"default_port_files"`
FTPReport string `json:"ftp_report"`
OverQuota int64 `json:"overquota"`
AvailableStorage int64 `json:"available_storage"`
CDN string `json:"cdn"`
Offer string `json:"offer"`
SubscriptionEnd string `json:"subscription_end"`
TFA string `json:"2fa"`
AllowedColdStorage int64 `json:"allowed_cold_storage"`
HotStorage int64 `json:"hot_storage"`
DefaultColdStorageQuota int64 `json:"default_cold_storage_quota"`
FTPMode string `json:"ftp_mode"`
RUReport string `json:"ru_report"`
}

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/rclone/ftp"
"github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -52,13 +52,11 @@ func init() {
Help: "FTP host to connect to.\n\nE.g. \"ftp.example.com\".",
Required: true,
}, {
Name: "user",
Help: "FTP username.",
Default: currentUser,
Name: "user",
Help: "FTP username, leave blank for current username, " + currentUser + ".",
}, {
Name: "port",
Help: "FTP port number.",
Default: 21,
Name: "port",
Help: "FTP port, leave blank to use default (21).",
}, {
Name: "pass",
Help: "FTP password.",

View File

@@ -65,7 +65,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)
@@ -182,30 +182,15 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "asia-northeast1",
Help: "Tokyo",
}, {
Value: "asia-northeast2",
Help: "Osaka",
}, {
Value: "asia-northeast3",
Help: "Seoul",
}, {
Value: "asia-south1",
Help: "Mumbai",
}, {
Value: "asia-south2",
Help: "Delhi",
}, {
Value: "asia-southeast1",
Help: "Singapore",
}, {
Value: "asia-southeast2",
Help: "Jakarta",
}, {
Value: "australia-southeast1",
Help: "Sydney",
}, {
Value: "australia-southeast2",
Help: "Melbourne",
}, {
Value: "europe-north1",
Help: "Finland",
@@ -221,12 +206,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "europe-west4",
Help: "Netherlands",
}, {
Value: "europe-west6",
Help: "Zürich",
}, {
Value: "europe-central2",
Help: "Warsaw",
}, {
Value: "us-central1",
Help: "Iowa",
@@ -242,33 +221,6 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
}, {
Value: "us-west2",
Help: "California",
}, {
Value: "us-west3",
Help: "Salt Lake City",
}, {
Value: "us-west4",
Help: "Las Vegas",
}, {
Value: "northamerica-northeast1",
Help: "Montréal",
}, {
Value: "northamerica-northeast2",
Help: "Toronto",
}, {
Value: "southamerica-east1",
Help: "São Paulo",
}, {
Value: "southamerica-west1",
Help: "Santiago",
}, {
Value: "asia1",
Help: "Dual region: asia-northeast1 and asia-northeast2.",
}, {
Value: "eur4",
Help: "Dual region: europe-north1 and europe-west4.",
}, {
Value: "nam4",
Help: "Dual region: us-central1 and us-east1.",
}},
}, {
Name: "storage_class",

View File

@@ -69,7 +69,7 @@ var (
Endpoint: google.Endpoint,
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectURL,
RedirectURL: oauthutil.TitleBarRedirectURL,
}
)

View File

@@ -202,11 +202,7 @@ func (f *Fs) wrapEntries(baseEntries fs.DirEntries) (hashEntries fs.DirEntries,
for _, entry := range baseEntries {
switch x := entry.(type) {
case fs.Object:
obj, err := f.wrapObject(x, nil)
if err != nil {
return nil, err
}
hashEntries = append(hashEntries, obj)
hashEntries = append(hashEntries, f.wrapObject(x, nil))
default:
hashEntries = append(hashEntries, entry) // trash in - trash out
}
@@ -255,7 +251,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
if do := f.Fs.Features().PutStream; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutStream not supported")
}
@@ -265,7 +261,7 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
if do := f.Fs.Features().PutUnchecked; do != nil {
_ = f.pruneHash(src.Remote())
oResult, err := do(ctx, in, src, options...)
return f.wrapObject(oResult, err)
return f.wrapObject(oResult, err), err
}
return nil, errors.New("PutUnchecked not supported")
}
@@ -352,7 +348,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantCopy
}
oResult, err := do(ctx, o.Object, remote)
return f.wrapObject(oResult, err)
return f.wrapObject(oResult, err), err
}
// Move src to this remote using server-side move operations.
@@ -375,7 +371,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dir: false,
fs: f,
})
return f.wrapObject(oResult, nil)
return f.wrapObject(oResult, nil), nil
}
// DirMove moves src, srcRemote to this remote at dstRemote using server-side move operations.
@@ -414,7 +410,7 @@ func (f *Fs) Shutdown(ctx context.Context) (err error) {
// NewObject finds the Object at remote.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
o, err := f.Fs.NewObject(ctx, remote)
return f.wrapObject(o, err)
return f.wrapObject(o, err), err
}
//
@@ -428,14 +424,11 @@ type Object struct {
}
// Wrap base object into hasher object
func (f *Fs) wrapObject(o fs.Object, err error) (*Object, error) {
if err != nil {
return nil, err
func (f *Fs) wrapObject(o fs.Object, err error) *Object {
if err != nil || o == nil {
return nil
}
if o == nil {
return nil, fs.ErrorObjectNotFound
}
return &Object{Object: o, f: f}, nil
return &Object{Object: o, f: f}
}
// Fs returns read only access to the Fs that this object is part of

View File

@@ -25,6 +25,7 @@ func TestIntegration(t *testing.T) {
"OpenWriterAt",
},
UnimplementableObjectMethods: []string{},
QuickTestOK: true,
}
if *fstest.RemoteName == "" {
tempDir := filepath.Join(os.TempDir(), "rclone-hasher-test")

View File

@@ -210,8 +210,8 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
_ = f.pruneHash(src.Remote())
oResult, err := f.Fs.Put(ctx, wrapIn, src, options...)
o, err = f.wrapObject(oResult, err)
if err != nil {
o = f.wrapObject(oResult, err)
if o == nil {
return nil, err
}

View File

@@ -22,8 +22,9 @@ func init() {
Help: "Hadoop name node and port.\n\nE.g. \"namenode:8020\" to connect to host namenode at port 8020.",
Required: true,
}, {
Name: "username",
Help: "Hadoop user name.",
Name: "username",
Help: "Hadoop user name.",
Required: false,
Examples: []fs.OptionExample{{
Value: "root",
Help: "Connect to hdfs as root.",
@@ -35,6 +36,7 @@ func init() {
Enables KERBEROS authentication. Specifies the Service Principal Name
(SERVICE/FQDN) for the namenode. E.g. \"hdfs/namenode.hadoop.docker\"
for namenode running as service 'hdfs' with FQDN 'namenode.hadoop.docker'.`,
Required: false,
Advanced: true,
}, {
Name: "data_transfer_protection",
@@ -44,6 +46,7 @@ Specifies whether or not authentication, data signature integrity
checks, and wire encryption is required when communicating the the
datanodes. Possible values are 'authentication', 'integrity' and
'privacy'. Used only with KERBEROS enabled.`,
Required: false,
Examples: []fs.OptionExample{{
Value: "privacy",
Help: "Ensure authentication, integrity and encryption enabled.",

View File

@@ -52,7 +52,8 @@ The input format is comma separated list of key,value pairs. Standard
For example, to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.`,
You can set multiple headers, e.g. '"Cookie","name=value","Authorization","xxx"'.
`,
Default: fs.CommaSepList{},
Advanced: true,
}, {
@@ -73,9 +74,8 @@ directories.`,
Advanced: true,
}, {
Name: "no_head",
Help: `Don't use HEAD requests.
Help: `Don't use HEAD requests to find file sizes in dir listing.
HEAD requests are mainly used to find file sizes in dir listing.
If your site is being very slow to load then you can try this option.
Normally rclone does a HEAD request for each potential file in a
directory listing to:
@@ -84,9 +84,12 @@ directory listing to:
- check it really exists
- check to see if it is a directory
If you set this option, rclone will not do the HEAD request. This will mean
that directory listings are much quicker, but rclone won't have the times or
sizes of any files, and some files that don't exist may be in the listing.`,
If you set this option, rclone will not do the HEAD request. This will mean
- directory listings are much quicker
- rclone won't have the times or sizes of any files
- some files that don't exist may be in the listing
`,
Default: false,
Advanced: true,
}},
@@ -130,87 +133,11 @@ func statusError(res *http.Response, err error) error {
}
if res.StatusCode < 200 || res.StatusCode > 299 {
_ = res.Body.Close()
return fmt.Errorf("HTTP Error: %s", res.Status)
return fmt.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
}
return nil
}
// getFsEndpoint decides if url is to be considered a file or directory,
// and returns a proper endpoint url to use for the fs.
func getFsEndpoint(ctx context.Context, client *http.Client, url string, opt *Options) (string, bool) {
// If url ends with '/' it is already a proper url always assumed to be a directory.
if url[len(url)-1] == '/' {
return url, false
}
// If url does not end with '/' we send a HEAD request to decide
// if it is directory or file, and if directory appends the missing
// '/', or if file returns the directory url to parent instead.
createFileResult := func() (string, bool) {
fs.Debugf(nil, "If path is a directory you must add a trailing '/'")
parent, _ := path.Split(url)
return parent, true
}
createDirResult := func() (string, bool) {
fs.Debugf(nil, "To avoid the initial HEAD request add a trailing '/' to the path")
return url + "/", false
}
// If HEAD requests are not allowed we just have to assume it is a file.
if opt.NoHead {
fs.Debugf(nil, "Assuming path is a file as --http-no-head is set")
return createFileResult()
}
// Use a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be created: %v", err)
return createFileResult()
}
addHeaders(req, opt)
res, err := noRedir.Do(req)
if err != nil {
fs.Debugf(nil, "Assuming path is a file as HEAD request could not be sent: %v", err)
return createFileResult()
}
if res.StatusCode == http.StatusNotFound {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is it does not exist as a file (%s)", res.Status)
return createDirResult()
}
if res.StatusCode == http.StatusMovedPermanently ||
res.StatusCode == http.StatusFound ||
res.StatusCode == http.StatusSeeOther ||
res.StatusCode == http.StatusTemporaryRedirect ||
res.StatusCode == http.StatusPermanentRedirect {
redir := res.Header.Get("Location")
if redir != "" {
if redir[len(redir)-1] == '/' {
fs.Debugf(nil, "Assuming path is a directory as HEAD response is redirect (%s) to a path that ends with '/': %s", res.Status, redir)
return createDirResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) to a path that does not end with '/': %s", res.Status, redir)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is redirect (%s) but no location header", res.Status)
return createFileResult()
}
if res.StatusCode < 200 || res.StatusCode > 299 {
// Example is 403 (http.StatusForbidden) for servers not allowing HEAD requests.
fs.Debugf(nil, "Assuming path is a file as HEAD response is an error (%s)", res.Status)
return createFileResult()
}
fs.Debugf(nil, "Assuming path is a file as HEAD response is success (%s)", res.Status)
return createFileResult()
}
// NewFs creates a new Fs object from the name and root. It connects to
// the host specified in the config file.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
@@ -241,9 +168,37 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
client := fshttp.NewClient(ctx)
endpoint, isFile := getFsEndpoint(ctx, client, u.String(), opt)
fs.Debugf(nil, "Root: %s", endpoint)
u, err = url.Parse(endpoint)
var isFile = false
if !strings.HasSuffix(u.String(), "/") {
// Make a client which doesn't follow redirects so the server
// doesn't redirect http://host/dir to http://host/dir/
noRedir := *client
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
// check to see if points to a file
req, err := http.NewRequestWithContext(ctx, "HEAD", u.String(), nil)
if err == nil {
addHeaders(req, opt)
res, err := noRedir.Do(req)
err = statusError(res, err)
if err == nil {
isFile = true
}
}
}
newRoot := u.String()
if isFile {
// Point to the parent if this is a file
newRoot, _ = path.Split(u.String())
} else {
if !strings.HasSuffix(newRoot, "/") {
newRoot += "/"
}
}
u, err = url.Parse(newRoot)
if err != nil {
return nil, err
}
@@ -261,16 +216,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
CanHaveEmptyDirectories: true,
}).Fill(ctx, f)
if isFile {
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if !strings.HasSuffix(f.endpointURL, "/") {
return nil, errors.New("internal error: url doesn't end with /")
}
return f, nil
}
@@ -346,7 +297,7 @@ func parseName(base *url.URL, name string) (string, error) {
}
// check it doesn't have URL parameters
uStr := u.String()
if strings.Contains(uStr, "?") {
if strings.Index(uStr, "?") >= 0 {
return "", errFoundQuestionMark
}
// check that this is going back to the same host and scheme
@@ -458,7 +409,7 @@ func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error
return nil, fmt.Errorf("readDir: %w", err)
}
default:
return nil, fmt.Errorf("can't parse content type %q", contentType)
return nil, fmt.Errorf("Can't parse content type %q", contentType)
}
return names, nil
}

View File

@@ -8,10 +8,8 @@ import (
"net/http/httptest"
"net/url"
"os"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"testing"
"time"
@@ -26,11 +24,10 @@ import (
)
var (
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
lineEndSize = 1
remoteName = "TestHTTP"
testPath = "test"
filesPath = filepath.Join(testPath, "files")
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
)
// prepareServer the test server and return a function to tidy it up afterwards
@@ -38,22 +35,6 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
// file server for test/files
fileServer := http.FileServer(http.Dir(filesPath))
// verify the file path is correct, and also check which line endings
// are used to get sizes right ("\n" except on Windows, but even there
// we may have "\n" or "\r\n" depending on git crlf setting)
fileList, err := ioutil.ReadDir(filesPath)
require.NoError(t, err)
require.Greater(t, len(fileList), 0)
for _, file := range fileList {
if !file.IsDir() {
data, _ := ioutil.ReadFile(filepath.Join(filesPath, file.Name()))
if strings.HasSuffix(string(data), "\r\n") {
lineEndSize = 2
}
break
}
}
// test the headers are there then pass on to fileServer
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
@@ -110,7 +91,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
e = entries[1]
assert.Equal(t, "one%.txt", e.Remote())
assert.Equal(t, int64(5+lineEndSize), e.Size())
assert.Equal(t, int64(6), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
@@ -127,7 +108,7 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
_, ok = e.(fs.Directory)
assert.True(t, ok)
} else {
assert.Equal(t, int64(40+lineEndSize), e.Size())
assert.Equal(t, int64(41), e.Size())
_, ok = e.(*Object)
assert.True(t, ok)
}
@@ -160,7 +141,7 @@ func TestListSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "three/underthree.txt", e.Remote())
assert.Equal(t, int64(8+lineEndSize), e.Size())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -173,7 +154,7 @@ func TestNewObject(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "four/under four.txt", o.Remote())
assert.Equal(t, int64(8+lineEndSize), o.Size())
assert.Equal(t, int64(9), o.Size())
_, ok := o.(*Object)
assert.True(t, ok)
@@ -206,11 +187,7 @@ func TestOpen(t *testing.T) {
data, err := ioutil.ReadAll(fd)
require.NoError(t, err)
require.NoError(t, fd.Close())
if lineEndSize == 2 {
assert.Equal(t, "beetroot\r\n", string(data))
} else {
assert.Equal(t, "beetroot\n", string(data))
}
assert.Equal(t, "beetroot\n", string(data))
// Test with range request
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
@@ -259,7 +236,7 @@ func TestIsAFileSubDir(t *testing.T) {
e := entries[0]
assert.Equal(t, "underthree.txt", e.Remote())
assert.Equal(t, int64(8+lineEndSize), e.Size())
assert.Equal(t, int64(9), e.Size())
_, ok := e.(*Object)
assert.True(t, ok)
}
@@ -376,106 +353,3 @@ func TestParseCaddy(t *testing.T) {
"v1.36-22-g06ea13a-ssh-agentβ/",
})
}
func TestFsNoSlashRoots(t *testing.T) {
// Test Fs with roots that does not end with '/', the logic that
// decides if url is to be considered a file or directory, based
// on result from a HEAD request.
// Handler for faking HEAD responses with different status codes
headCount := 0
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "HEAD" {
headCount++
responseCode, err := strconv.Atoi(path.Base(r.URL.String()))
require.NoError(t, err)
if strings.HasPrefix(r.URL.String(), "/redirect/") {
var redir string
if strings.HasPrefix(r.URL.String(), "/redirect/file/") {
redir = "/redirected"
} else if strings.HasPrefix(r.URL.String(), "/redirect/dir/") {
redir = "/redirected/"
} else {
require.Fail(t, "Redirect test requests must start with '/redirect/file/' or '/redirect/dir/'")
}
http.Redirect(w, r, redir, responseCode)
} else {
http.Error(w, http.StatusText(responseCode), responseCode)
}
}
})
// Make the test server
ts := httptest.NewServer(handler)
defer ts.Close()
// Configure the remote
configfile.Install()
m := configmap.Simple{
"type": "http",
"url": ts.URL,
}
// Test
for i, test := range []struct {
root string
isFile bool
}{
// 2xx success
{"parent/200", true},
{"parent/204", true},
// 3xx redirection Redirect status 301, 302, 303, 307, 308
{"redirect/file/301", true}, // Request is redirected to "/redirected"
{"redirect/dir/301", false}, // Request is redirected to "/redirected/"
{"redirect/file/302", true}, // Request is redirected to "/redirected"
{"redirect/dir/302", false}, // Request is redirected to "/redirected/"
{"redirect/file/303", true}, // Request is redirected to "/redirected"
{"redirect/dir/303", false}, // Request is redirected to "/redirected/"
{"redirect/file/304", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/305", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/306", true}, // Not really a redirect, handled like 4xx errors (below)
{"redirect/file/307", true}, // Request is redirected to "/redirected"
{"redirect/dir/307", false}, // Request is redirected to "/redirected/"
{"redirect/file/308", true}, // Request is redirected to "/redirected"
{"redirect/dir/308", false}, // Request is redirected to "/redirected/"
// 4xx client errors
{"parent/403", true}, // Forbidden status (head request blocked)
{"parent/404", false}, // Not found status
} {
for _, noHead := range []bool{false, true} {
var isFile bool
if noHead {
m.Set("no_head", "true")
isFile = true
} else {
m.Set("no_head", "false")
isFile = test.isFile
}
headCount = 0
f, err := NewFs(context.Background(), remoteName, test.root, m)
if noHead {
assert.Equal(t, 0, headCount)
} else {
assert.Equal(t, 1, headCount)
}
if isFile {
assert.ErrorIs(t, err, fs.ErrorIsFile)
} else {
assert.NoError(t, err)
}
var endpoint string
if isFile {
parent, _ := path.Split(test.root)
endpoint = "/" + parent
} else {
endpoint = "/" + test.root + "/"
}
what := fmt.Sprintf("i=%d, root=%q, isFile=%v, noHead=%v", i, test.root, isFile, noHead)
assert.Equal(t, ts.URL+endpoint, f.String(), what)
}
}
}

View File

@@ -7,7 +7,6 @@ import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/xml"
"errors"
"fmt"
"io"
@@ -932,120 +931,48 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
return entries, nil
}
type listStreamTime time.Time
// listFileDirFn is called from listFileDir to handle an object.
type listFileDirFn func(fs.DirEntry) error
func (c *listStreamTime) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
var v string
if err := d.DecodeElement(&v, &start); err != nil {
return err
}
t, err := time.Parse(time.RFC3339, v)
if err != nil {
return err
}
*c = listStreamTime(t)
return nil
}
func (c listStreamTime) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%s\"", time.Time(c).Format(time.RFC3339))), nil
}
func parseListRStream(ctx context.Context, r io.Reader, trimPrefix string, filesystem *Fs, callback func(fs.DirEntry) error) error {
type stats struct {
Folders int `xml:"folders"`
Files int `xml:"files"`
}
var expected, actual stats
type xmlFile struct {
Path string `xml:"path"`
Name string `xml:"filename"`
Checksum string `xml:"md5"`
Size int64 `xml:"size"`
Modified listStreamTime `xml:"modified"`
Created listStreamTime `xml:"created"`
}
type xmlFolder struct {
Path string `xml:"path"`
}
addFolder := func(path string) error {
return callback(fs.NewDir(filesystem.opt.Enc.ToStandardPath(path), time.Time{}))
}
addFile := func(f *xmlFile) error {
return callback(&Object{
hasMetaData: true,
fs: filesystem,
remote: filesystem.opt.Enc.ToStandardPath(path.Join(f.Path, f.Name)),
size: f.Size,
md5: f.Checksum,
modTime: time.Time(f.Modified),
})
}
trimPathPrefix := func(p string) string {
p = strings.TrimPrefix(p, trimPrefix)
p = strings.TrimPrefix(p, "/")
return p
}
uniqueFolders := map[string]bool{}
decoder := xml.NewDecoder(r)
for {
t, err := decoder.Token()
if err != nil {
if err != io.EOF {
return err
}
break
// List the objects and directories into entries, from a
// special kind of JottaFolder representing a FileDirLis
func (f *Fs) listFileDir(ctx context.Context, remoteStartPath string, startFolder *api.JottaFolder, fn listFileDirFn) error {
pathPrefix := "/" + f.filePathRaw("") // Non-escaped prefix of API paths to be cut off, to be left with the remote path including the remoteStartPath
pathPrefixLength := len(pathPrefix)
startPath := path.Join(pathPrefix, remoteStartPath) // Non-escaped API path up to and including remoteStartPath, to decide if it should be created as a new dir object
startPathLength := len(startPath)
for i := range startFolder.Folders {
folder := &startFolder.Folders[i]
if !f.validFolder(folder) {
return nil
}
switch se := t.(type) {
case xml.StartElement:
switch se.Name.Local {
case "file":
var f xmlFile
if err := decoder.DecodeElement(&f, &se); err != nil {
return err
}
f.Path = trimPathPrefix(f.Path)
actual.Files++
if !uniqueFolders[f.Path] {
uniqueFolders[f.Path] = true
actual.Folders++
if err := addFolder(f.Path); err != nil {
return err
}
}
if err := addFile(&f); err != nil {
return err
}
case "folder":
var f xmlFolder
if err := decoder.DecodeElement(&f, &se); err != nil {
return err
}
f.Path = trimPathPrefix(f.Path)
uniqueFolders[f.Path] = true
actual.Folders++
if err := addFolder(f.Path); err != nil {
return err
}
case "stats":
if err := decoder.DecodeElement(&expected, &se); err != nil {
folderPath := f.opt.Enc.ToStandardPath(path.Join(folder.Path, folder.Name))
folderPathLength := len(folderPath)
var remoteDir string
if folderPathLength > pathPrefixLength {
remoteDir = folderPath[pathPrefixLength+1:]
if folderPathLength > startPathLength {
d := fs.NewDir(remoteDir, time.Time(folder.ModifiedAt))
err := fn(d)
if err != nil {
return err
}
}
}
for i := range folder.Files {
file := &folder.Files[i]
if f.validFile(file) {
remoteFile := path.Join(remoteDir, f.opt.Enc.ToStandardName(file.Name))
o, err := f.newObjectWithInfo(ctx, remoteFile, file)
if err != nil {
return err
}
err = fn(o)
if err != nil {
return err
}
}
}
}
if expected.Folders != actual.Folders ||
expected.Files != actual.Files {
return fmt.Errorf("Invalid result from listStream: expected[%#v] != actual[%#v]", expected, actual)
}
return nil
}
@@ -1061,27 +988,12 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
Path: f.filePath(dir),
Parameters: url.Values{},
}
opts.Parameters.Set("mode", "liststream")
list := walk.NewListRHelper(callback)
opts.Parameters.Set("mode", "list")
var resp *http.Response
var result api.JottaFolder // Could be JottaFileDirList, but JottaFolder is close enough
err = f.pacer.Call(func() (bool, error) {
resp, err = f.srv.Call(ctx, &opts)
if err != nil {
return shouldRetry(ctx, resp, err)
}
// liststream paths are /mountpoint/root/path
// so the returned paths should have /mountpoint/root/ trimmed
// as the caller is expecting path.
trimPrefix := path.Join("/", f.opt.Mountpoint, f.root)
err = parseListRStream(ctx, resp.Body, trimPrefix, f, func(d fs.DirEntry) error {
if d.Remote() == dir {
return nil
}
return list.Add(d)
})
_ = resp.Body.Close()
resp, err = f.srv.CallXML(ctx, &opts, nil, &result)
return shouldRetry(ctx, resp, err)
})
if err != nil {
@@ -1093,6 +1005,10 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
return fmt.Errorf("couldn't list files: %w", err)
}
list := walk.NewListRHelper(callback)
err = f.listFileDir(ctx, dir, &result, func(entry fs.DirEntry) error {
return list.Add(entry)
})
if err != nil {
return err
}

View File

@@ -28,57 +28,33 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "koofr",
Description: "Koofr, Digi Storage and other Koofr-compatible storage providers",
Description: "Koofr",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your storage provider.",
// NOTE if you add a new provider here, then add it in the
// setProviderDefaults() function and update options accordingly
Examples: []fs.OptionExample{{
Value: "koofr",
Help: "Koofr, https://app.koofr.net/",
}, {
Value: "digistorage",
Help: "Digi Storage, https://storage.rcs-rds.ro/",
}, {
Value: "other",
Help: "Any other Koofr API compatible storage service",
}},
}, {
Name: "endpoint",
Help: "The Koofr API endpoint to use.",
Provider: "other",
Default: "https://app.koofr.net",
Required: true,
Advanced: true,
}, {
Name: "mountid",
Help: "Mount ID of the mount to use.\n\nIf omitted, the primary mount is used.",
Required: false,
Default: "",
Advanced: true,
}, {
Name: "setmtime",
Help: "Does the backend support setting modification time.\n\nSet this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
Default: true,
Required: true,
Advanced: true,
}, {
Name: "user",
Help: "Your user name.",
Help: "Your Koofr user name.",
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
Provider: "koofr",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at https://storage.rcs-rds.ro/app/admin/preferences/password).",
Provider: "digistorage",
IsPassword: true,
Required: true,
}, {
Name: "password",
Help: "Your password for rclone (generate one at your service's settings page).",
Provider: "other",
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password).",
IsPassword: true,
Required: true,
}, {
@@ -95,7 +71,6 @@ func init() {
// Options represent the configuration of the Koofr backend
type Options struct {
Provider string `config:"provider"`
Endpoint string `config:"endpoint"`
MountID string `config:"mountid"`
User string `config:"user"`
@@ -280,38 +255,13 @@ func (f *Fs) fullPath(part string) string {
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
}
func setProviderDefaults(opt *Options) {
// handle old, provider-less configs
if opt.Provider == "" {
if opt.Endpoint == "" || strings.HasPrefix(opt.Endpoint, "https://app.koofr.net") {
opt.Provider = "koofr"
} else if strings.HasPrefix(opt.Endpoint, "https://storage.rcs-rds.ro") {
opt.Provider = "digistorage"
} else {
opt.Provider = "other"
}
}
// now assign an endpoint
if opt.Provider == "koofr" {
opt.Endpoint = "https://app.koofr.net"
} else if opt.Provider == "digistorage" {
opt.Endpoint = "https://storage.rcs-rds.ro"
}
}
// NewFs constructs a new filesystem given a root path and rclone configuration options
// NewFs constructs a new filesystem given a root path and configuration options
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
opt := new(Options)
err = configstruct.Set(m, opt)
if err != nil {
return nil, err
}
setProviderDefaults(opt)
return NewFsFromOptions(ctx, name, root, opt)
}
// NewFsFromOptions constructs a new filesystem given a root path and internal configuration options
func NewFsFromOptions(ctx context.Context, name, root string, opt *Options) (ff fs.Fs, err error) {
pass, err := obscure.Reveal(opt.Password)
if err != nil {
return nil, err

View File

@@ -1133,9 +1133,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
return err
}
// Wipe hashes before update
o.clearHashCache()
var symlinkData bytes.Buffer
// If the object is a regular file, create it.
// If it is a translated link, just read in the contents, and
@@ -1298,13 +1295,6 @@ func (o *Object) setMetadata(info os.FileInfo) {
}
}
// clearHashCache wipes any cached hashes for the object
func (o *Object) clearHashCache() {
o.fs.objectMetaMu.Lock()
o.hashes = nil
o.fs.objectMetaMu.Unlock()
}
// Stat an Object into info
func (o *Object) lstat() error {
info, err := o.fs.lstat(o.path)
@@ -1316,7 +1306,6 @@ func (o *Object) lstat() error {
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
o.clearHashCache()
return remove(o.path)
}

View File

@@ -1,7 +1,6 @@
package local
import (
"bytes"
"context"
"io/ioutil"
"os"
@@ -13,7 +12,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/lib/file"
"github.com/rclone/rclone/lib/readers"
@@ -168,64 +166,3 @@ func TestSymlinkError(t *testing.T) {
_, err := NewFs(context.Background(), "local", "/", m)
assert.Equal(t, errLinksAndCopyLinks, err)
}
// Test hashes on updating an object
func TestHashOnUpdate(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Reupload it with diferent contents but same size and timestamp
var b = bytes.NewBufferString("CONTENT")
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
err = o.Update(ctx, b, src)
require.NoError(t, err)
// Check the hash is as expected
md5, err = o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "45685e95985e20822fb2538a522a5ccf", md5)
}
// Test hashes on deleting an object
func TestHashOnDelete(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
const filePath = "file.txt"
when := time.Now()
r.WriteFile(filePath, "content", when)
f := r.Flocal.(*Fs)
// Get the object
o, err := f.NewObject(ctx, filePath)
require.NoError(t, err)
// Test the hash is as we expect
md5, err := o.Hash(ctx, hash.MD5)
require.NoError(t, err)
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
// Delete the object
require.NoError(t, o.Remove(ctx))
// Test the hash cache is empty
require.Nil(t, o.(*Object).hashes)
// Test the hash returns an error
_, err = o.Hash(ctx, hash.MD5)
require.Error(t, err)
}

View File

@@ -11,7 +11,8 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "",
NilObject: (*local.Object)(nil),
RemoteName: "",
NilObject: (*local.Object)(nil),
QuickTestOK: true,
})
}

View File

@@ -58,7 +58,7 @@ type UserInfoResponse struct {
AutoProlong bool `json:"auto_prolong"`
Basequota int64 `json:"basequota"`
Enabled bool `json:"enabled"`
Expires int64 `json:"expires"`
Expires int `json:"expires"`
Prolong bool `json:"prolong"`
Promocodes struct {
} `json:"promocodes"`
@@ -80,7 +80,7 @@ type UserInfoResponse struct {
FileSizeLimit int64 `json:"file_size_limit"`
Space struct {
BytesTotal int64 `json:"bytes_total"`
BytesUsed int64 `json:"bytes_used"`
BytesUsed int `json:"bytes_used"`
Overquota bool `json:"overquota"`
} `json:"space"`
} `json:"cloud"`

View File

@@ -1572,7 +1572,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
}
total := info.Body.Cloud.Space.BytesTotal
used := info.Body.Cloud.Space.BytesUsed
used := int64(info.Body.Cloud.Space.BytesUsed)
usage := &fs.Usage{
Total: fs.NewUsageValue(total),

View File

@@ -10,7 +10,8 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: ":memory:",
NilObject: (*Object)(nil),
RemoteName: ":memory:",
NilObject: (*Object)(nil),
QuickTestOK: true,
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +0,0 @@
package netstorage_test
import (
"testing"
"github.com/rclone/rclone/backend/netstorage"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestnStorage:",
NilObject: (*netstorage.Object)(nil),
})
}

View File

@@ -65,12 +65,9 @@ var (
authPath = "/common/oauth2/v2.0/authorize"
tokenPath = "/common/oauth2/v2.0/token"
scopesWithSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"}
scopesWithoutSitePermission = []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access"}
// Description of how to auth for this app for a business account
oauthConfig = &oauth2.Config{
Scopes: scopesWithSitePermission,
Scopes: []string{"Files.Read", "Files.ReadWrite", "Files.Read.All", "Files.ReadWrite.All", "offline_access", "Sites.Read.All"},
ClientID: rcloneClientID,
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
RedirectURL: oauthutil.RedirectLocalhostURL,
@@ -140,26 +137,6 @@ Note that the chunks will be buffered into memory.`,
Help: "The type of the drive (" + driveTypePersonal + " | " + driveTypeBusiness + " | " + driveTypeSharepoint + ").",
Default: "",
Advanced: true,
}, {
Name: "root_folder_id",
Help: `ID of the root folder.
This isn't normally needed, but in special circumstances you might
know the folder ID that you wish to access but not be able to get
there through a path traversal.
`,
Advanced: true,
}, {
Name: "disable_site_permission",
Help: `Disable the request for Sites.Read.All permission.
If set to true, you will no longer be able to search for a SharePoint site when
configuring drive ID, because rclone will not request Sites.Read.All permission.
Set it to true if your organization didn't assign Sites.Read.All permission to the
application, and your organization disallows users to consent app permission
request on their own.`,
Default: false,
Advanced: true,
}, {
Name: "expose_onenote_files",
Help: `Set to make OneNote files show up in directory listings.
@@ -397,12 +374,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
region, graphURL := getRegionURL(m)
if config.State == "" {
disableSitePermission, _ := m.Get("disable_site_permission")
if disableSitePermission == "true" {
oauthConfig.Scopes = scopesWithoutSitePermission
} else {
oauthConfig.Scopes = scopesWithSitePermission
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[region] + authPath,
TokenURL: authEndpoint[region] + tokenPath,
@@ -556,8 +527,6 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DriveID string `config:"drive_id"`
DriveType string `config:"drive_type"`
RootFolderID string `config:"root_folder_id"`
DisableSitePermission bool `config:"disable_site_permission"`
ExposeOneNoteFiles bool `config:"expose_onenote_files"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ListChunk int64 `config:"list_chunk"`
@@ -649,12 +618,6 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
retry := false
if resp != nil {
switch resp.StatusCode {
case 400:
if apiErr, ok := err.(*api.Error); ok {
if apiErr.ErrorInfo.InnerError.Code == "pathIsTooLong" {
return false, fserrors.NoRetryError(err)
}
}
case 401:
if len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
retry = true
@@ -826,11 +789,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
rootURL := graphAPIEndpoint[opt.Region] + "/v1.0" + "/drives/" + opt.DriveID
if opt.DisableSitePermission {
oauthConfig.Scopes = scopesWithoutSitePermission
} else {
oauthConfig.Scopes = scopesWithSitePermission
}
oauthConfig.Endpoint = oauth2.Endpoint{
AuthURL: authEndpoint[opt.Region] + authPath,
TokenURL: authEndpoint[opt.Region] + tokenPath,
@@ -868,19 +826,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
})
// Get rootID
var rootID = opt.RootFolderID
if rootID == "" {
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil {
return nil, fmt.Errorf("failed to get root: %w", err)
}
rootID = rootInfo.GetID()
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
if err != nil {
return nil, fmt.Errorf("failed to get root: %w", err)
}
if rootID == "" {
if rootInfo.GetID() == "" {
return nil, errors.New("failed to get root: ID was empty")
}
f.dirCache = dircache.New(root, rootID, f)
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
// Find the current root
err = f.dirCache.FindRoot(ctx, false)
@@ -888,7 +842,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// Assume it is a file
newRoot, remote := dircache.SplitPath(root)
tempF := *f
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
tempF.root = newRoot
// Make new Fs which is the parent
err = tempF.dirCache.FindRoot(ctx, false)

View File

@@ -136,8 +136,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for i := 0; i < len(q.data)-1; i++ {
d := q.data[i]
for _, d := range q.data[:len(q.data)-1] {
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))

View File

@@ -2,6 +2,8 @@
// object storage system.
package pcloud
// FIXME implement ListR? /listfolder can do recursive lists
// FIXME cleanup returns login required?
// FIXME mime type? Fix overview if implement.
@@ -25,7 +27,6 @@ import (
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/oauthutil"
@@ -245,7 +246,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.It
return nil, err
}
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
if item.Name == leaf {
info = item
return true
@@ -379,7 +380,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// FindLeaf finds a directory of name leaf in the folder with ID pathID
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
// Find the leaf in pathID
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
if item.Name == leaf {
pathIDOut = item.ID
return true
@@ -445,16 +446,14 @@ type listAllFn func(*api.Item) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
opts := rest.Opts{
Method: "GET",
Path: "/listfolder",
Parameters: url.Values{},
}
if recursive {
opts.Parameters.Set("recursive", "1")
}
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
// FIXME can do recursive
var result api.ItemResult
var resp *http.Response
@@ -466,69 +465,24 @@ func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, fi
if err != nil {
return found, fmt.Errorf("couldn't list files: %w", err)
}
var recursiveContents func(is []api.Item, path string)
recursiveContents = func(is []api.Item, path string) {
for i := range is {
item := &is[i]
if item.IsFolder {
if filesOnly {
continue
}
} else {
if directoriesOnly {
continue
}
for i := range result.Metadata.Contents {
item := &result.Metadata.Contents[i]
if item.IsFolder {
if filesOnly {
continue
}
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
}
if recursive {
recursiveContents(item.Contents, item.Name+"/")
}
}
}
recursiveContents(result.Metadata.Contents, "")
return
}
// listHelper iterates over all items from the directory
// and calls the callback for each element.
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
iErr = callback(d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
if directoriesOnly {
continue
}
iErr = callback(o)
}
if iErr != nil {
return true
item.Name = f.opt.Enc.ToStandardName(item.Name)
if fn(item) {
found = true
break
}
return false
})
if err != nil {
return err
}
if iErr != nil {
return iErr
}
return nil
return
}
// List the objects and directories in dir into entries. The
@@ -541,24 +495,36 @@ func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callbac
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
entries = append(entries, o)
return nil
})
return entries, err
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
list := walk.NewListRHelper(callback)
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
return list.Add(o)
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
if err != nil {
return nil, err
}
var iErr error
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
remote := path.Join(dir, info.Name)
if info.IsFolder {
// cache the directory ID for later lookups
f.dirCache.Put(remote, info.ID)
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
// FIXME more info from dir?
entries = append(entries, d)
} else {
o, err := f.newObjectWithInfo(ctx, remote, info)
if err != nil {
iErr = err
return true
}
entries = append(entries, o)
}
return false
})
if err != nil {
return err
return nil, err
}
return list.Flush()
if iErr != nil {
return nil, iErr
}
return entries, nil
}
// Creates from the parameters passed in a half finished Object which
@@ -690,7 +656,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", srcObj.modTime.Unix()))
var resp *http.Response
var result api.ItemResult
err = f.pacer.Call(func() (bool, error) {
@@ -1171,7 +1137,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
opts.Parameters.Set("filename", leaf)
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
opts.Parameters.Set("nopartial", "1")
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
opts.Parameters.Set("mtime", fmt.Sprintf("%d", modTime.Unix()))
// Special treatment for a 0 length upload. This doesn't work
// with PUT even with Content-Length set (by setting

View File

@@ -84,9 +84,6 @@ func init() {
}, {
Value: "IBMCOS",
Help: "IBM COS S3",
}, {
Value: "Lyve",
Help: "Seagate Lyve Cloud",
}, {
Value: "Minio",
Help: "Minio Object Storage",
@@ -105,9 +102,6 @@ func init() {
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
}, {
Value: "Storj",
Help: "Storj (S3 Compatible Gateway)",
}, {
Value: "TencentCOS",
Help: "Tencent Cloud Object Storage (COS)",
@@ -294,7 +288,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,RackCorp,Scaleway,Storj,TencentCOS",
Provider: "!AWS,Alibaba,RackCorp,Scaleway,TencentCOS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -603,20 +597,6 @@ func init() {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint of the Shared Gateway.",
Provider: "Storj",
Examples: []fs.OptionExample{{
Value: "gateway.eu1.storjshare.io",
Help: "EU1 Shared Gateway",
}, {
Value: "gateway.us1.storjshare.io",
Help: "US1 Shared Gateway",
}, {
Value: "gateway.ap1.storjshare.io",
Help: "Asia-Pacific Shared Gateway",
}},
}, {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
Name: "endpoint",
@@ -746,7 +726,7 @@ func init() {
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,Storj,RackCorp",
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath,RackCorp",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -767,10 +747,6 @@ func init() {
Value: "localhost:8333",
Help: "SeaweedFS S3 localhost",
Provider: "SeaweedFS",
}, {
Value: "s3.us-east-1.lyvecloud.seagate.com",
Help: "Seagate Lyve Cloud US East 1",
Provider: "Lyve",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
@@ -785,11 +761,7 @@ func init() {
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-1.wasabisys.com",
Help: "Wasabi AP Northeast 1 (Tokyo) endpoint",
Provider: "Wasabi",
}, {
Value: "s3.ap-northeast-2.wasabisys.com",
Help: "Wasabi AP Northeast 2 (Osaka) endpoint",
Help: "Wasabi AP Northeast endpoint",
Provider: "Wasabi",
}},
}, {
@@ -1038,7 +1010,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
Provider: "!AWS,IBMCOS,Alibaba,RackCorp,Scaleway,StackPath,TencentCOS",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -1049,7 +1021,6 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Provider: "!Storj",
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
@@ -1209,9 +1180,6 @@ If you leave it blank, this is calculated automatically from the sse_customer_ke
}, {
Value: "INTELLIGENT_TIERING",
Help: "Intelligent-Tiering storage class",
}, {
Value: "GLACIER_IR",
Help: "Glacier Instant Retrieval storage class",
}},
}, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
@@ -1555,14 +1523,6 @@ See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rcl
This is usually set to a CloudFront CDN URL as AWS S3 offers
cheaper egress for data downloaded through the CloudFront network.`,
Advanced: true,
}, {
Name: "use_multipart_etag",
Help: `Whether to use ETag in multipart uploads for verification
This should be true, false or left unset to use the default for the provider.
`,
Default: fs.Tristate{},
Advanced: true,
},
}})
}
@@ -1627,7 +1587,6 @@ type Options struct {
MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"`
DisableHTTP2 bool `config:"disable_http2"`
DownloadURL string `config:"download_url"`
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
}
// Fs represents a remote s3 server
@@ -1931,13 +1890,12 @@ func setQuirks(opt *Options) {
listObjectsV2 = true
virtualHostStyle = true
urlEncodeListings = true
useMultipartEtag = true
)
switch opt.Provider {
case "AWS":
// No quirks
case "Alibaba":
useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
// No quirks
case "Ceph":
listObjectsV2 = false
virtualHostStyle = false
@@ -1950,18 +1908,13 @@ func setQuirks(opt *Options) {
listObjectsV2 = false // untested
virtualHostStyle = false
urlEncodeListings = false
useMultipartEtag = false // untested
case "Lyve":
useMultipartEtag = false // Lyve seems to calculate multipart Etags differently from AWS
case "Minio":
virtualHostStyle = false
case "Netease":
listObjectsV2 = false // untested
urlEncodeListings = false
useMultipartEtag = false // untested
case "RackCorp":
// No quirks
useMultipartEtag = false // untested
case "Scaleway":
// Scaleway can only have 1000 parts in an upload
if opt.MaxUploadParts > 1000 {
@@ -1972,32 +1925,23 @@ func setQuirks(opt *Options) {
listObjectsV2 = false // untested
virtualHostStyle = false
urlEncodeListings = false
useMultipartEtag = false // untested
case "StackPath":
listObjectsV2 = false // untested
virtualHostStyle = false
urlEncodeListings = false
case "Storj":
// Force chunk size to >= 64 MiB
if opt.ChunkSize < 64*fs.Mebi {
opt.ChunkSize = 64 * fs.Mebi
}
case "TencentCOS":
listObjectsV2 = false // untested
useMultipartEtag = false // untested
listObjectsV2 = false // untested
case "Wasabi":
// No quirks
case "Other":
listObjectsV2 = false
virtualHostStyle = false
urlEncodeListings = false
useMultipartEtag = false
default:
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
listObjectsV2 = false
virtualHostStyle = false
urlEncodeListings = false
useMultipartEtag = false
}
// Path Style vs Virtual Host style
@@ -2019,12 +1963,6 @@ func setQuirks(opt *Options) {
opt.ListVersion = 1
}
}
// Set the correct use multipart Etag for error checking if not manually set
if !opt.UseMultipartEtag.Valid {
opt.UseMultipartEtag.Valid = true
opt.UseMultipartEtag.Value = useMultipartEtag
}
}
// setRoot changes the root of the Fs
@@ -2125,11 +2063,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
if opt.Provider == "Storj" {
f.features.Copy = nil
f.features.SetTier = false
f.features.GetTier = false
}
// f.listMultipartUploads()
return f, nil
}
@@ -3276,6 +3209,9 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
if err != nil {
return err
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
}
o.setMetaData(resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
return nil
}
@@ -3305,7 +3241,6 @@ func (o *Object) setMetaData(etag *string, contentLength *int64, lastModified *t
o.storageClass = aws.StringValue(storageClass)
if lastModified == nil {
o.lastModified = time.Now()
fs.Logf(o, "Failed to read last modified")
} else {
o.lastModified = *lastModified
}
@@ -3386,7 +3321,11 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
return nil, err
}
contentLength := &resp.ContentLength
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
fs.Debugf(o, "Failed to parse content length from string %s, %v", resp.Header.Get("Content-Length"), err)
}
contentLength := &size
if resp.Header.Get("Content-Range") != "" {
var contentRange = resp.Header.Get("Content-Range")
slash := strings.IndexRune(contentRange, '/')
@@ -3477,7 +3416,9 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
if err != nil {
return nil, err
}
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified: %v", err)
}
// read size from ContentLength or ContentRange
size := resp.ContentLength
if resp.ContentRange != nil {
@@ -3500,7 +3441,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
var warnStreamUpload sync.Once
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, err error) {
func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
f := o.fs
// make concurrency machinery
@@ -3547,7 +3488,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
return f.shouldRetry(ctx, err)
})
if err != nil {
return etag, fmt.Errorf("multipart upload failed to initialise: %w", err)
return fmt.Errorf("multipart upload failed to initialise: %w", err)
}
uid := cout.UploadId
@@ -3576,21 +3517,8 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
partsMu sync.Mutex // to protect parts
parts []*s3.CompletedPart
off int64
md5sMu sync.Mutex
md5s []byte
)
addMd5 := func(md5binary *[md5.Size]byte, partNum int64) {
md5sMu.Lock()
defer md5sMu.Unlock()
start := partNum * md5.Size
end := start + md5.Size
if extend := end - int64(len(md5s)); extend > 0 {
md5s = append(md5s, make([]byte, extend)...)
}
copy(md5s[start:end], (*md5binary)[:])
}
for partNum := int64(1); !finished; partNum++ {
// Get a block of memory from the pool and token which limits concurrency.
tokens.Get()
@@ -3620,7 +3548,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
finished = true
} else if err != nil {
free()
return etag, fmt.Errorf("multipart upload failed to read source: %w", err)
return fmt.Errorf("multipart upload failed to read source: %w", err)
}
buf = buf[:n]
@@ -3633,7 +3561,6 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
// create checksum of buffer for integrity checking
md5sumBinary := md5.Sum(buf)
addMd5(&md5sumBinary, partNum-1)
md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
err = f.pacer.Call(func() (bool, error) {
@@ -3675,7 +3602,7 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
}
err = g.Wait()
if err != nil {
return etag, err
return err
}
// sort the completed parts by part number
@@ -3696,11 +3623,9 @@ func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, si
return f.shouldRetry(ctx, err)
})
if err != nil {
return etag, fmt.Errorf("multipart upload failed to finalise: %w", err)
return fmt.Errorf("multipart upload failed to finalise: %w", err)
}
hashOfHashes := md5.Sum(md5s)
etag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(parts))
return etag, nil
return nil
}
// Update the Object from in with modTime and size
@@ -3726,20 +3651,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
var md5sumBase64 string
var md5sumHex string
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
md5sumHex, err = src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(md5sumHex) {
hashBytes, err := hex.DecodeString(md5sumHex)
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
// Set the md5sum as metadata on the object if
// - a multipart upload
// - the Etag is not an MD5, eg when using SSE/SSE-C
// provided checksums aren't disabled
metadata[metaMD5Hash] = &md5sumBase64
metadata[metaMD5Hash] = &md5sum
}
}
}
@@ -3754,8 +3678,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentType: &mimeType,
Metadata: metadata,
}
if md5sumBase64 != "" {
req.ContentMD5 = &md5sumBase64
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.RequesterPays {
req.RequestPayer = aws.String(s3.RequestPayerRequester)
@@ -3809,9 +3733,8 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
var resp *http.Response // response from PUT
var wantETag string // Multipart upload Etag to check
if multipart {
wantETag, err = o.uploadMultipart(ctx, &req, size, in)
err = o.uploadMultipart(ctx, &req, size, in)
if err != nil {
return err
}
@@ -3873,7 +3796,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// so make up the object as best we can assuming it got
// uploaded properly. If size < 0 then we need to do the HEAD.
if o.fs.opt.NoHead && size >= 0 {
o.md5 = md5sumHex
o.md5 = md5sum
o.bytes = size
o.lastModified = time.Now()
o.meta = req.Metadata
@@ -3891,18 +3814,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
head, err := o.headObject(ctx)
if err != nil {
return err
}
o.setMetaData(head.ETag, head.ContentLength, head.LastModified, head.Metadata, head.ContentType, head.StorageClass)
if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
if wantETag != gotETag {
return fmt.Errorf("multipart upload corrupted: Etag differ: expecting %s but got %s", wantETag, gotETag)
}
fs.Debugf(o, "Multipart upload Etag: %s OK", wantETag)
}
err = o.readMetaData(ctx)
return err
}

View File

@@ -42,8 +42,7 @@ const (
hashCommandNotSupported = "none"
minSleep = 100 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
keepAliveInterval = time.Minute // send keepalives every this long while running commands
decayConstant = 2 // bigger for slower decay, exponential
)
var (
@@ -60,13 +59,11 @@ func init() {
Help: "SSH host to connect to.\n\nE.g. \"example.com\".",
Required: true,
}, {
Name: "user",
Help: "SSH username.",
Default: currentUser,
Name: "user",
Help: "SSH username, leave blank for current username, " + currentUser + ".",
}, {
Name: "port",
Help: "SSH port number.",
Default: 22,
Name: "port",
Help: "SSH port, leave blank to use default (22).",
}, {
Name: "pass",
Help: "SSH password, leave blank to use ssh-agent.",
@@ -342,32 +339,6 @@ func (c *conn) wait() {
c.err <- c.sshClient.Conn.Wait()
}
// Send a keepalive over the ssh connection
func (c *conn) sendKeepAlive() {
_, _, err := c.sshClient.SendRequest("keepalive@openssh.com", true, nil)
if err != nil {
fs.Debugf(nil, "Failed to send keep alive: %v", err)
}
}
// Send keepalives every interval over the ssh connection until done is closed
func (c *conn) sendKeepAlives(interval time.Duration) (done chan struct{}) {
done = make(chan struct{})
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-t.C:
c.sendKeepAlive()
case <-done:
return
}
}
}()
return done
}
// Closes the connection
func (c *conn) close() error {
sftpErr := c.sftpClient.Close()
@@ -1127,9 +1098,6 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
}
defer f.putSftpConnection(&c, err)
// Send keepalives while the connection is open
defer close(c.sendKeepAlives(keepAliveInterval))
session, err := c.sshClient.NewSession()
if err != nil {
return nil, fmt.Errorf("run: get SFTP session: %w", err)
@@ -1142,12 +1110,10 @@ func (f *Fs) run(ctx context.Context, cmd string) ([]byte, error) {
session.Stdout = &stdout
session.Stderr = &stderr
fs.Debugf(f, "Running remote command: %s", cmd)
err = session.Run(cmd)
if err != nil {
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, bytes.TrimSpace(stderr.Bytes()), err)
return nil, fmt.Errorf("failed to run %q: %s: %w", cmd, stderr.Bytes(), err)
}
fs.Debugf(f, "Remote command result: %s", bytes.TrimSpace(stdout.Bytes()))
return stdout.Bytes(), nil
}
@@ -1264,6 +1230,8 @@ func (o *Object) Remote() string {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
o.fs.addSession() // Show session in use
defer o.fs.removeSession()
if o.fs.opt.DisableHashCheck {
return "", nil
}
@@ -1287,16 +1255,36 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
return "", hash.ErrUnsupported
}
c, err := o.fs.getSftpConnection(ctx)
if err != nil {
return "", fmt.Errorf("Hash get SFTP connection: %w", err)
}
session, err := c.sshClient.NewSession()
o.fs.putSftpConnection(&c, err)
if err != nil {
return "", fmt.Errorf("Hash put SFTP connection: %w", err)
}
var stdout, stderr bytes.Buffer
session.Stdout = &stdout
session.Stderr = &stderr
escapedPath := shellEscape(o.path())
if o.fs.opt.PathOverride != "" {
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
}
b, err := o.fs.run(ctx, hashCmd+" "+escapedPath)
err = session.Run(hashCmd + " " + escapedPath)
fs.Debugf(nil, "sftp cmd = %s", escapedPath)
if err != nil {
return "", fmt.Errorf("failed to calculate %v hash: %w", r, err)
_ = session.Close()
fs.Debugf(o, "Failed to calculate %v hash: %v (%s)", r, err, bytes.TrimSpace(stderr.Bytes()))
return "", nil
}
_ = session.Close()
b := stdout.Bytes()
fs.Debugf(nil, "sftp output = %q", b)
str := parseHash(b)
fs.Debugf(nil, "sftp hash = %q", str)
if r == hash.MD5 {
o.md5sum = &str
} else if r == hash.SHA1 {

View File

@@ -754,34 +754,22 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
var total, objects int64
if f.rootContainer != "" {
var container swift.Container
err = f.pacer.Call(func() (bool, error) {
container, _, err = f.c.Container(ctx, f.rootContainer)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container info failed: %w", err)
}
total = container.Bytes
objects = container.Count
} else {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
usage = &fs.Usage{
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
usage := &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}

View File

@@ -1,8 +1,8 @@
//go:build !plan9
// +build !plan9
// Package storj provides an interface to Storj decentralized object storage.
package storj
// Package tardigrade provides an interface to Tardigrade decentralized object storage.
package tardigrade
import (
"context"
@@ -31,17 +31,16 @@ const (
)
var satMap = map[string]string{
"us-central-1.storj.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.storj.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.storj.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
"us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
"europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777",
"asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777",
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "storj",
Description: "Storj Decentralized Cloud Storage",
Aliases: []string{"tardigrade"},
Name: "tardigrade",
Description: "Tardigrade Decentralized Cloud Storage",
NewFs: NewFs,
Config: func(ctx context.Context, name string, m configmap.Mapper, configIn fs.ConfigIn) (*fs.ConfigOut, error) {
provider, _ := m.Get(fs.ConfigProvider)
@@ -85,9 +84,10 @@ func init() {
},
Options: []fs.Option{
{
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Default: existingProvider,
Name: fs.ConfigProvider,
Help: "Choose an authentication method.",
Required: true,
Default: existingProvider,
Examples: []fs.OptionExample{{
Value: "existing",
Help: "Use an existing access grant.",
@@ -99,21 +99,23 @@ func init() {
{
Name: "access_grant",
Help: "Access grant.",
Required: false,
Provider: "existing",
},
{
Name: "satellite_address",
Help: "Satellite address.\n\nCustom satellite address should match the format: `<nodeid>@<address>:<port>`.",
Required: false,
Provider: newProvider,
Default: "us-central-1.storj.io",
Default: "us-central-1.tardigrade.io",
Examples: []fs.OptionExample{{
Value: "us-central-1.storj.io",
Value: "us-central-1.tardigrade.io",
Help: "US Central 1",
}, {
Value: "europe-west-1.storj.io",
Value: "europe-west-1.tardigrade.io",
Help: "Europe West 1",
}, {
Value: "asia-east-1.storj.io",
Value: "asia-east-1.tardigrade.io",
Help: "Asia East 1",
},
},
@@ -121,11 +123,13 @@ func init() {
{
Name: "api_key",
Help: "API key.",
Required: false,
Provider: newProvider,
},
{
Name: "passphrase",
Help: "Encryption passphrase.\n\nTo access existing objects enter passphrase used for uploading.",
Required: false,
Provider: newProvider,
},
},
@@ -141,7 +145,7 @@ type Options struct {
Passphrase string `config:"passphrase"`
}
// Fs represents a remote to Storj
// Fs represents a remote to Tardigrade
type Fs struct {
name string // the name of the remote
root string // root of the filesystem
@@ -161,9 +165,9 @@ var (
_ fs.PutStreamer = &Fs{}
)
// NewFs creates a filesystem backed by Storj.
// NewFs creates a filesystem backed by Tardigrade.
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs, err error) {
// Setup filesystem and connection to Storj
// Setup filesystem and connection to Tardigrade
root = norm.NFC.String(root)
root = strings.Trim(root, "/")
@@ -184,24 +188,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if f.opts.Access != "" {
access, err = uplink.ParseAccess(f.opts.Access)
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
}
if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" {
access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase)
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
err = config.SetValueAndSave(f.name, "access_grant", serializedAccess)
if err != nil {
return nil, fmt.Errorf("storj: access: %w", err)
return nil, fmt.Errorf("tardigrade: access: %w", err)
}
}
@@ -233,7 +237,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
if bucketName != "" && bucketPath != "" {
_, err = project.StatBucket(ctx, bucketName)
if err != nil {
return f, fmt.Errorf("storj: bucket: %w", err)
return f, fmt.Errorf("tardigrade: bucket: %w", err)
}
object, err := project.StatObject(ctx, bucketName, bucketPath)
@@ -259,7 +263,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (_ fs.Fs,
return f, nil
}
// connect opens a connection to Storj.
// connect opens a connection to Tardigrade.
func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
fs.Debugf(f, "connecting...")
defer fs.Debugf(f, "connected: %+v", err)
@@ -270,7 +274,7 @@ func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) {
project, err = cfg.OpenProject(ctx, f.access)
if err != nil {
return nil, fmt.Errorf("storj: project: %w", err)
return nil, fmt.Errorf("tardigrade: project: %w", err)
}
return

View File

@@ -1,7 +1,7 @@
//go:build !plan9
// +build !plan9
package storj
package tardigrade
import (
"context"
@@ -18,7 +18,7 @@ import (
"storj.io/uplink"
)
// Object describes a Storj object
// Object describes a Tardigrade object
type Object struct {
fs *Fs
@@ -32,7 +32,7 @@ type Object struct {
// Check the interfaces are satisfied.
var _ fs.Object = &Object{}
// newObjectFromUplink creates a new object from a Storj uplink object.
// newObjectFromUplink creates a new object from a Tardigrade uplink object.
func newObjectFromUplink(f *Fs, relative string, object *uplink.Object) *Object {
// Attempt to use the modified time from the metadata. Otherwise
// fallback to the server time.

View File

@@ -1,20 +1,20 @@
//go:build !plan9
// +build !plan9
// Test Storj filesystem interface
package storj_test
// Test Tardigrade filesystem interface
package tardigrade_test
import (
"testing"
"github.com/rclone/rclone/backend/storj"
"github.com/rclone/rclone/backend/tardigrade"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
RemoteName: "TestStorj:",
NilObject: (*storj.Object)(nil),
RemoteName: "TestTardigrade:",
NilObject: (*tardigrade.Object)(nil),
})
}

View File

@@ -1,4 +1,4 @@
//go:build plan9
// +build plan9
package storj
package tardigrade

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"sync"
"time"
@@ -85,10 +84,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
err := o.Update(ctx, readers[i], src, options...)
if err != nil {
errs[i] = fmt.Errorf("%s: %w", o.UpstreamFs().Name(), err)
if len(entries) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
}
}
} else {
errs[i] = fs.ErrorNotAFile

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"path"
"path/filepath"
"strings"
@@ -34,21 +33,25 @@ func init() {
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
Required: true,
}, {
Name: "action_policy",
Help: "Policy to choose upstream on ACTION category.",
Default: "epall",
Name: "action_policy",
Help: "Policy to choose upstream on ACTION category.",
Required: true,
Default: "epall",
}, {
Name: "create_policy",
Help: "Policy to choose upstream on CREATE category.",
Default: "epmfs",
Name: "create_policy",
Help: "Policy to choose upstream on CREATE category.",
Required: true,
Default: "epmfs",
}, {
Name: "search_policy",
Help: "Policy to choose upstream on SEARCH category.",
Default: "ff",
Name: "search_policy",
Help: "Policy to choose upstream on SEARCH category.",
Required: true,
Default: "ff",
}, {
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
Default: 120,
Name: "cache_time",
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
Required: true,
Default: 120,
}},
}
fs.Register(fsi)
@@ -487,10 +490,6 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bo
}
if err != nil {
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
if len(upstreams) > 1 {
// Drain the input buffer to allow other uploads to continue
_, _ = io.Copy(ioutil.Discard, readers[i])
}
return
}
objs[i] = u.WrapObject(o)

View File

@@ -4,6 +4,8 @@ import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
@@ -18,12 +20,19 @@ import (
)
// MakeTestDirs makes directories in /tmp for testing
func MakeTestDirs(t *testing.T, n int) (dirs []string) {
func MakeTestDirs(t *testing.T, n int) (dirs []string, clean func()) {
for i := 1; i <= n; i++ {
dir := t.TempDir()
dir, err := ioutil.TempDir("", fmt.Sprintf("rclone-union-test-%d", n))
require.NoError(t, err)
dirs = append(dirs, dir)
}
return dirs
clean = func() {
for _, dir := range dirs {
err := os.RemoveAll(dir)
assert.NoError(t, err)
}
}
return dirs, clean
}
func (f *Fs) TestInternalReadOnly(t *testing.T) {
@@ -86,7 +95,8 @@ func TestMoveCopy(t *testing.T) {
t.Skip("Skipping as -remote set")
}
ctx := context.Background()
dirs := MakeTestDirs(t, 1)
dirs, clean := MakeTestDirs(t, 1)
defer clean()
fsString := fmt.Sprintf(":union,upstreams='%s :memory:bucket':", dirs[0])
f, err := fs.NewFs(ctx, fsString)
require.NoError(t, err)

View File

@@ -27,7 +27,8 @@ func TestStandard(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnion"
fstests.Run(t, &fstests.Opt{
@@ -41,6 +42,7 @@ func TestStandard(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -48,7 +50,8 @@ func TestRO(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":ro " + dirs[2] + ":ro"
name := "TestUnionRO"
fstests.Run(t, &fstests.Opt{
@@ -62,6 +65,7 @@ func TestRO(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -69,7 +73,8 @@ func TestNC(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + ":nc " + dirs[2] + ":nc"
name := "TestUnionNC"
fstests.Run(t, &fstests.Opt{
@@ -83,6 +88,7 @@ func TestNC(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -90,7 +96,8 @@ func TestPolicy1(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy1"
fstests.Run(t, &fstests.Opt{
@@ -104,6 +111,7 @@ func TestPolicy1(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -111,7 +119,8 @@ func TestPolicy2(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy2"
fstests.Run(t, &fstests.Opt{
@@ -125,6 +134,7 @@ func TestPolicy2(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}
@@ -132,7 +142,8 @@ func TestPolicy3(t *testing.T) {
if *fstest.RemoteName != "" {
t.Skip("Skipping as -remote set")
}
dirs := union.MakeTestDirs(t, 3)
dirs, clean := union.MakeTestDirs(t, 3)
defer clean()
upstreams := dirs[0] + " " + dirs[1] + " " + dirs[2]
name := "TestUnionPolicy3"
fstests.Run(t, &fstests.Opt{
@@ -146,5 +157,6 @@ func TestPolicy3(t *testing.T) {
},
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
UnimplementableObjectMethods: []string{"MimeType"},
QuickTestOK: true,
})
}

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"io"
"math"
"path"
"path/filepath"
"strings"
"sync"
"sync/atomic"
@@ -89,7 +91,7 @@ func New(ctx context.Context, remote, root string, cacheTime time.Duration) (*Fs
return nil, err
}
f.RootFs = rFs
rootString := fspath.JoinRootPath(remote, root)
rootString := path.Join(remote, filepath.ToSlash(root))
myFs, err := cache.Get(ctx, rootString)
if err != nil && err != fs.ErrorIsFile {
return nil, err

View File

@@ -28,7 +28,6 @@ import (
"github.com/rclone/rclone/lib/readers"
"github.com/rclone/rclone/lib/rest"
"golang.org/x/oauth2"
"golang.org/x/sync/errgroup"
)
//oAuth
@@ -67,11 +66,6 @@ func init() {
})
},
Options: append(oauthutil.SharedOptions, []fs.Option{{
Name: "hard_delete",
Help: "Delete files permanently rather than putting them into the trash.",
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
@@ -85,9 +79,8 @@ func init() {
// Options defines the configuration for this backend
type Options struct {
Token string `config:"token"`
HardDelete bool `config:"hard_delete"`
Enc encoder.MultiEncoder `config:"encoding"`
Token string `config:"token"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote yandex
@@ -637,7 +630,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
}
}
//delete directory
return f.delete(ctx, root, f.opt.HardDelete)
return f.delete(ctx, root, false)
}
// Rmdir deletes the container
@@ -1074,7 +1067,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
return resp.Body, err
}
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, mimeType string, options ...fs.OpenOption) (err error) {
// prepare upload
var resp *http.Response
var ur api.AsyncInfo
@@ -1088,29 +1081,6 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, src f
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
opts.Parameters.Set("overwrite", strconv.FormatBool(overwrite))
// Check to see if we can calculate a MD5 and SHA256 hash and
// if so start calculating them to do de-dupe the uploads.
var (
hashes = src.Fs().Hashes()
size = src.Size()
dedupe = size >= 0 && hashes.Contains(hash.MD5) && hashes.Contains(hash.SHA256)
g *errgroup.Group
gCtx context.Context
md5sum string
sha256sum string
)
if dedupe {
g, gCtx = errgroup.WithContext(ctx)
g.Go(func() (err error) {
md5sum, err = src.Hash(gCtx, hash.MD5)
return err
})
g.Go(func() (err error) {
sha256sum, err = src.Hash(gCtx, hash.SHA256)
return err
})
}
err = o.fs.pacer.Call(func() (bool, error) {
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &ur)
return shouldRetry(ctx, resp, err)
@@ -1122,27 +1092,11 @@ func (o *Object) upload(ctx context.Context, in io.Reader, overwrite bool, src f
// perform the actual upload
opts = rest.Opts{
RootURL: ur.HRef,
Method: "PUT",
ContentType: fs.MimeType(ctx, src),
Body: in,
ExtraHeaders: map[string]string{},
NoResponse: true,
}
if size >= 0 {
opts.ContentLength = &size
}
// Add the hashes to the PUT to dedupe the upload if possible
if dedupe {
err = g.Wait()
if err != nil {
fs.Debugf(o, "failed to calculate MD5 or SHA256: %v", err)
} else {
opts.ExtraHeaders["Expect"] = "100-continue"
opts.ExtraHeaders["Etag"] = md5sum
opts.ExtraHeaders["Sha256"] = sha256sum
}
RootURL: ur.HRef,
Method: "PUT",
ContentType: mimeType,
Body: in,
NoResponse: true,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1170,7 +1124,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
}
//upload file
err = o.upload(ctx, in1, true, src, options...)
err = o.upload(ctx, in1, true, fs.MimeType(ctx, src), options...)
if err != nil {
return err
}
@@ -1187,7 +1141,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
return o.fs.delete(ctx, o.filePath(), o.fs.opt.HardDelete)
return o.fs.delete(ctx, o.filePath(), false)
}
// MimeType of an Object if known, "" otherwise

View File

@@ -52,7 +52,6 @@ var (
var osarches = []string{
"windows/386",
"windows/amd64",
"windows/arm64",
"darwin/amd64",
"darwin/arm64",
"linux/386",
@@ -86,13 +85,6 @@ var archFlags = map[string][]string{
"arm-v7": {"GOARM=7"},
}
// Map Go architectures to NFPM architectures
// Any missing are passed straight through
var goarchToNfpm = map[string]string{
"arm": "arm6",
"arm-v7": "arm7",
}
// runEnv - run a shell command with env
func runEnv(args, env []string) error {
if *debug {
@@ -175,15 +167,11 @@ func buildDebAndRpm(dir, version, goarch string) []string {
pkgVersion := version[1:]
pkgVersion = strings.Replace(pkgVersion, "β", "-beta", -1)
pkgVersion = strings.Replace(pkgVersion, "-", ".", -1)
nfpmArch, ok := goarchToNfpm[goarch]
if !ok {
nfpmArch = goarch
}
// Make nfpm.yaml from the template
substitute("../bin/nfpm.yaml", path.Join(dir, "nfpm.yaml"), map[string]string{
"Version": pkgVersion,
"Arch": nfpmArch,
"Arch": goarch,
})
// build them
@@ -389,7 +377,7 @@ func compileArch(version, goos, goarch, dir string) bool {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
}
if *copyAs != "" {
for _, artifact := range artifacts {

View File

@@ -52,7 +52,6 @@ docs = [
"mailru.md",
"mega.md",
"memory.md",
"netstorage.md",
"azureblob.md",
"onedrive.md",
"opendrive.md",
@@ -64,8 +63,8 @@ docs = [
"putio.md",
"seafile.md",
"sftp.md",
"storj.md",
"sugarsync.md",
"tardigrade.md",
"uptobox.md",
"union.md",
"webdav.md",

View File

@@ -41,7 +41,7 @@ You can discover what commands a backend implements by using
rclone backend help <backendname>
You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
[operations/fsinfo](/rc/#operations/fsinfo) in the remote control docs
for more info).
rclone backend features remote:
@@ -55,7 +55,7 @@ Pass arguments to the backend by placing them on the end of the line
rclone backend cleanup remote:path file1 file2 file3
Note to run these commands on a running backend then see
[backend/command](/rc/#backend-command) in the rc docs.
[backend/command](/rc/#backend/command) in the rc docs.
`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(2, 1e6, command, args)
@@ -149,7 +149,7 @@ See [the "rclone backend" command](/commands/rclone_backend/) for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend-command).
[backend/command](/rc/#backend/command).
`, name)
for _, cmd := range cmds {

View File

@@ -168,7 +168,7 @@ func mount(VFS *vfs.VFS, mountPath string, opt *mountlib.Options) (<-chan error,
host.SetCapCaseInsensitive(f.Features().CaseInsensitive)
// Create options
options := mountOptions(VFS, opt.DeviceName, mountpoint, opt)
options := mountOptions(VFS, f.Name()+":"+f.Root(), mountpoint, opt)
fs.Debugf(f, "Mounting with options: %q", options)
// Serve the mount point in the background returning error to errChan

View File

@@ -10,17 +10,11 @@
package cmount
import (
"runtime"
"testing"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/vfs/vfstest"
)
func TestMount(t *testing.T) {
// Disable tests under macOS and the CI since they are locking up
if runtime.GOOS == "darwin" {
testy.SkipUnreliable(t)
}
vfstest.RunTests(t, false, mount)
}

View File

@@ -165,7 +165,7 @@ func runRoot(cmd *cobra.Command, args []string) {
// setupRootCommand sets default usage, help, and error handling for
// the root command.
//
// Helpful example: https://github.com/moby/moby/blob/master/cli/cobra.go
// Helpful example: http://rtfcode.com/xref/moby-17.03.2-ce/cli/cobra.go
func setupRootCommand(rootCmd *cobra.Command) {
ci := fs.GetConfig(context.Background())
// Add global flags
@@ -329,29 +329,12 @@ func showBackend(name string) {
if opt.IsPassword {
fmt.Printf("**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/).\n\n")
}
fmt.Printf("Properties:\n\n")
fmt.Printf("- Config: %s\n", opt.Name)
fmt.Printf("- Env Var: %s\n", opt.EnvVarName(backend.Prefix))
if opt.Provider != "" {
fmt.Printf("- Provider: %s\n", opt.Provider)
}
fmt.Printf("- Type: %s\n", opt.Type())
defaultValue := opt.GetValue()
// Default value and Required are related: Required means option must
// have a value, but if there is a default then a value does not have
// to be explicitely set and then Required makes no difference.
if defaultValue != "" {
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
} else {
fmt.Printf("- Required: %v\n", opt.Required)
}
// List examples / possible choices
fmt.Printf("- Default: %s\n", quoteString(opt.GetValue()))
if len(opt.Examples) > 0 {
if opt.Exclusive {
fmt.Printf("- Choices:\n")
} else {
fmt.Printf("- Examples:\n")
}
fmt.Printf("- Examples:\n")
for _, ex := range opt.Examples {
fmt.Printf(" - %s\n", quoteString(ex.Value))
for _, line := range strings.Split(ex.Help, "\n") {

View File

@@ -86,7 +86,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
f := VFS.Fs()
fs.Debugf(f, "Mounting on %q", mountpoint)
c, err := fuse.Mount(mountpoint, mountOptions(VFS, opt.DeviceName, opt)...)
c, err := fuse.Mount(mountpoint, mountOptions(VFS, f.Name()+":"+f.Root(), opt)...)
if err != nil {
return nil, nil, err
}

View File

@@ -25,10 +25,11 @@ func init() {
// mountOptions configures the options from the command line flags
//
// man mount.fuse for more info and note the -o flag for other options
func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.MountOptions) {
func mountOptions(fsys *FS, f fs.Fs) (mountOpts *fuse.MountOptions) {
device := f.Name() + ":" + f.Root()
mountOpts = &fuse.MountOptions{
AllowOther: fsys.opt.AllowOther,
FsName: opt.DeviceName,
FsName: device,
Name: "rclone",
DisableXAttrs: true,
Debug: fsys.opt.DebugFUSE,
@@ -119,7 +120,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
if runtime.GOOS == "darwin" {
opts = append(opts,
// VolumeName sets the volume name shown in Finder.
fmt.Sprintf("volname=%s", opt.VolumeName),
fmt.Sprintf("volname=%s", device),
// NoAppleXattr makes OSXFUSE disallow extended attributes with the
// prefix "com.apple.". This disables persistent Finder state and
@@ -166,7 +167,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
//mOpts.Debug = mountlib.DebugFUSE
//conn := fusefs.NewFileSystemConnector(nodeFs.Root(), mOpts)
mountOpts := mountOptions(fsys, f, opt)
mountOpts := mountOptions(fsys, f)
// FIXME fill out
opts := fusefs.Options{

View File

@@ -40,7 +40,6 @@ type Options struct {
ExtraOptions []string
ExtraFlags []string
AttrTimeout time.Duration // how long the kernel caches attribute for
DeviceName string
VolumeName string
NoAppleDouble bool
NoAppleXattr bool
@@ -126,7 +125,6 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &Opt.AsyncRead, "async-read", "", Opt.AsyncRead, "Use asynchronous reads (not supported on Windows)")
flags.FVarP(flagSet, &Opt.MaxReadAhead, "max-read-ahead", "", "The number of bytes that can be prefetched for sequential reads (not supported on Windows)")
flags.BoolVarP(flagSet, &Opt.WritebackCache, "write-back-cache", "", Opt.WritebackCache, "Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)")
flags.StringVarP(flagSet, &Opt.DeviceName, "devname", "", Opt.DeviceName, "Set the device name - default is remote:path")
// Windows and OSX
flags.StringVarP(flagSet, &Opt.VolumeName, "volname", "", Opt.VolumeName, "Set the volume name (supported on Windows and OSX only)")
// OSX only
@@ -237,7 +235,6 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
return nil, err
}
m.SetVolumeName(m.MountOpt.VolumeName)
m.SetDeviceName(m.MountOpt.DeviceName)
// Start background task if --daemon is specified
if m.MountOpt.Daemon {

View File

@@ -16,16 +16,11 @@ import (
"github.com/rclone/rclone/cmd/mountlib"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
"github.com/rclone/rclone/fstest/testy"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRc(t *testing.T) {
// Disable tests under macOS and the CI since they are locking up
if runtime.GOOS == "darwin" {
testy.SkipUnreliable(t)
}
ctx := context.Background()
configfile.Install()
mount := rc.Calls.Get("mount/mount")
@@ -35,14 +30,19 @@ func TestRc(t *testing.T) {
getMountTypes := rc.Calls.Get("mount/types")
assert.NotNil(t, getMountTypes)
localDir := t.TempDir()
err := ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
localDir, err := ioutil.TempDir("", "rclone-mountlib-localDir")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(localDir) }()
err = ioutil.WriteFile(filepath.Join(localDir, "file.txt"), []byte("hello"), 0666)
require.NoError(t, err)
mountPoint := t.TempDir()
mountPoint, err := ioutil.TempDir("", "rclone-mountlib-mountPoint")
require.NoError(t, err)
if runtime.GOOS == "windows" {
// Windows requires the mount point not to exist
require.NoError(t, os.RemoveAll(mountPoint))
} else {
defer func() { _ = os.RemoveAll(mountPoint) }()
}
out, err := getMountTypes.Fn(ctx, nil)

View File

@@ -87,7 +87,7 @@ func (m *MountPoint) CheckAllowings() error {
// SetVolumeName with sensible default
func (m *MountPoint) SetVolumeName(vol string) {
if vol == "" {
vol = fs.ConfigString(m.Fs)
vol = m.Fs.Name() + ":" + m.Fs.Root()
}
m.MountOpt.SetVolumeName(vol)
}
@@ -102,11 +102,3 @@ func (o *Options) SetVolumeName(vol string) {
}
o.VolumeName = vol
}
// SetDeviceName with sensible default
func (m *MountPoint) SetDeviceName(dev string) {
if dev == "" {
dev = fs.ConfigString(m.Fs)
}
m.MountOpt.DeviceName = dev
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 724 B

After

Width:  |  Height:  |  Size: 5.6 KiB

View File

@@ -23,7 +23,6 @@ import (
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/file"
"github.com/stretchr/testify/assert"
@@ -304,10 +303,6 @@ func (a *APIClient) request(path string, in, out interface{}, wantErr bool) {
}
func testMountAPI(t *testing.T, sockAddr string) {
// Disable tests under macOS and linux in the CI since they are locking up
if runtime.GOOS == "darwin" || runtime.GOOS == "linux" {
testy.SkipUnreliable(t)
}
if _, mountFn := mountlib.ResolveMountMethod(""); mountFn == nil {
t.Skip("Test requires working mount command")
}

View File

@@ -16,10 +16,7 @@ import (
)
// Help describes the options for the serve package
var Help = `
#### Template
--template allows a user to specify a custom markup template for http
var Help = `--template allows a user to specify a custom markup template for http
and webdav serve functions. The server exports the following markup
to be used within the template to server pages:

View File

@@ -23,7 +23,7 @@ func AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string, Opt *httplib.Options)
flags.StringVarP(flagSet, &Opt.SslKey, prefix+"key", "", Opt.SslKey, "SSL PEM Private key")
flags.StringVarP(flagSet, &Opt.ClientCA, prefix+"client-ca", "", Opt.ClientCA, "Client certificate authority to verify clients with")
flags.StringVarP(flagSet, &Opt.HtPasswd, prefix+"htpasswd", "", Opt.HtPasswd, "htpasswd file - if not provided no authentication is done")
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "Realm for authentication")
flags.StringVarP(flagSet, &Opt.Realm, prefix+"realm", "", Opt.Realm, "realm for authentication")
flags.StringVarP(flagSet, &Opt.BasicUser, prefix+"user", "", Opt.BasicUser, "User name for authentication")
flags.StringVarP(flagSet, &Opt.BasicPass, prefix+"pass", "", Opt.BasicPass, "Password for authentication")
flags.StringVarP(flagSet, &Opt.BaseURL, prefix+"baseurl", "", Opt.BaseURL, "Prefix for URLs - leave blank for root")

View File

@@ -16,7 +16,6 @@ TestFichier:
TestFTP:
TestGoogleCloudStorage:
TestHubic:
TestNetStorage:
TestOneDrive:
TestPcloud:
TestQingStor:

View File

@@ -7,7 +7,9 @@ import (
"crypto/rand"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
@@ -111,7 +113,14 @@ func TestResticHandler(t *testing.T) {
}
// setup rclone with a local backend in a temporary directory
tempdir := t.TempDir()
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
require.NoError(t, err)
// make sure the tempdir is properly removed
defer func() {
err := os.RemoveAll(tempdir)
require.NoError(t, err)
}()
// globally set append-only mode
prev := appendOnly

View File

@@ -7,7 +7,9 @@ import (
"context"
"crypto/rand"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
@@ -33,7 +35,14 @@ func TestResticPrivateRepositories(t *testing.T) {
require.NoError(t, err)
// setup rclone with a local backend in a temporary directory
tempdir := t.TempDir()
tempdir, err := ioutil.TempDir("", "rclone-restic-test-")
require.NoError(t, err)
// make sure the tempdir is properly removed
defer func() {
err := os.RemoveAll(tempdir)
require.NoError(t, err)
}()
// globally set private-repos mode & test user
prev := privateRepos

View File

@@ -8,7 +8,6 @@ exec rclone --check-normalization=true --check-control=true --check-length=true
TestDrive:testInfo \
TestDropbox:testInfo \
TestGoogleCloudStorage:rclone-testinfo \
TestnStorage:testInfo \
TestOneDrive:testInfo \
TestS3:rclone-testinfo \
TestSftp:testInfo \

View File

@@ -5,13 +5,11 @@ import (
"context"
"errors"
"fmt"
"log"
"time"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config/flags"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
@@ -65,32 +63,13 @@ then add the ` + "`--localtime`" + ` flag.
`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
f, remote := newFsDst(args)
f, fileName := cmd.NewFsFile(args[0])
cmd.Run(true, false, command, func() error {
return Touch(context.Background(), f, remote)
return Touch(context.Background(), f, fileName)
})
},
}
// newFsDst creates a new dst fs from the arguments.
//
// The returned fs will never point to a file. It will point to the
// parent directory of specified path, and is returned together with
// the basename of file or directory, except if argument is only a
// remote name. Similar to cmd.NewFsDstFile, but without raising fatal
// when name of file or directory is empty (e.g. "remote:" or "remote:path/").
func newFsDst(args []string) (f fs.Fs, remote string) {
root, remote, err := fspath.Split(args[0])
if err != nil {
log.Fatalf("Parsing %q failed: %v", args[0], err)
}
if root == "" {
root = "."
}
f = cmd.NewFsDir([]string{root})
return f, remote
}
// parseTimeArgument parses a timestamp string according to specific layouts
func parseTimeArgument(timeString string) (time.Time, error) {
layout := defaultLayout
@@ -128,51 +107,47 @@ func createEmptyObject(ctx context.Context, remote string, modTime time.Time, f
}
// Touch create new file or change file modification time.
func Touch(ctx context.Context, f fs.Fs, remote string) error {
func Touch(ctx context.Context, f fs.Fs, fileName string) error {
t, err := timeOfTouch()
if err != nil {
return err
}
fs.Debugf(nil, "Touch time %v", t)
file, err := f.NewObject(ctx, remote)
file, err := f.NewObject(ctx, fileName)
if err != nil {
if errors.Is(err, fs.ErrorObjectNotFound) {
// Touching non-existant path, possibly creating it as new file
if remote == "" {
fs.Logf(f, "Not touching empty directory")
return nil
}
// Touch single non-existent file
if notCreateNewFile {
fs.Logf(f, "Not touching non-existent file due to --no-create")
return nil
}
if recursive {
// For consistency, --recursive never creates new files.
fs.Logf(f, "Not touching non-existent file due to --recursive")
return nil
}
if operations.SkipDestructive(ctx, f, "touch (create)") {
return nil
}
fs.Debugf(f, "Touching (creating) %q", remote)
if err = createEmptyObject(ctx, remote, t, f); err != nil {
fs.Debugf(f, "Touching (creating)")
if err = createEmptyObject(ctx, fileName, t, f); err != nil {
return fmt.Errorf("failed to touch (create): %w", err)
}
}
if errors.Is(err, fs.ErrorIsDir) {
// Touching existing directory
if recursive {
fs.Debugf(f, "Touching recursively files in directory %q", remote)
return operations.TouchDir(ctx, f, remote, t, true)
// Touch existing directory, recursive
fs.Debugf(nil, "Touching files in directory recursively")
return operations.TouchDir(ctx, f, t, true)
}
fs.Debugf(f, "Touching non-recursively files in directory %q", remote)
return operations.TouchDir(ctx, f, remote, t, false)
// Touch existing directory without recursing
fs.Debugf(nil, "Touching files in directory non-recursively")
return operations.TouchDir(ctx, f, t, false)
}
return err
}
// Touch single existing file
if !operations.SkipDestructive(ctx, remote, "touch") {
fs.Debugf(f, "Touching %q", remote)
if !operations.SkipDestructive(ctx, fileName, "touch") {
fs.Debugf(f, "Touching %q", fileName)
err = file.SetModTime(ctx, t)
if err != nil {
return fmt.Errorf("failed to touch: %w", err)

View File

@@ -113,15 +113,6 @@ func TestTouchCreateMultipleDirAndFile(t *testing.T) {
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported)
}
func TestTouchEmptyName(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
err := Touch(context.Background(), r.Fremote, "")
require.NoError(t, err)
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{}, []string{}, fs.ModTimeNotSupported)
}
func TestTouchEmptyDir(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()

View File

@@ -43,6 +43,7 @@ func init() {
flags.StringVarP(cmdFlags, &outFileName, "output", "o", "", "Output to file instead of stdout")
// Files
flags.BoolVarP(cmdFlags, &opts.ByteSize, "size", "s", false, "Print the size in bytes of each file.")
flags.BoolVarP(cmdFlags, &opts.UnitSize, "human", "", false, "Print the size in a more human readable way.")
flags.BoolVarP(cmdFlags, &opts.FileMode, "protections", "p", false, "Print the protections for each file.")
// flags.BoolVarP(cmdFlags, &opts.ShowUid, "uid", "", false, "Displays file owner or UID number.")
// flags.BoolVarP(cmdFlags, &opts.ShowGid, "gid", "", false, "Displays file group owner or GID number.")

View File

@@ -102,7 +102,8 @@ var envInitial []string
// sets testConfig to testFolder/rclone.config.
func createTestEnvironment(t *testing.T) {
//Set temporary folder for config and test data
tempFolder := t.TempDir()
tempFolder, err := ioutil.TempDir("", "rclone_cmdtest_")
require.NoError(t, err)
testFolder = filepath.ToSlash(tempFolder)
// Set path to temporary config file

View File

@@ -105,7 +105,6 @@ WebDAV or S3, that work out of the box.)
{{< provider_list >}}
{{< provider name="1Fichier" home="https://1fichier.com/" config="/fichier/" start="true">}}
{{< provider name="Akamai Netstorage" home="https://www.akamai.com/us/en/products/media-delivery/netstorage.jsp" config="/netstorage/" >}}
{{< provider name="Alibaba Cloud (Aliyun) Object Storage System (OSS)" home="https://www.alibabacloud.com/product/oss/" config="/s3/#alibaba-oss" >}}
{{< provider name="Amazon Drive" home="https://www.amazon.com/clouddrive" config="/amazonclouddrive/" note="#status">}}
{{< provider name="Amazon S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
@@ -115,7 +114,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Citrix ShareFile" home="http://sharefile.com/" config="/sharefile/" >}}
{{< provider name="C14" home="https://www.online.net/en/storage/c14-cold-storage" config="/sftp/#c14" >}}
{{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
{{< provider name="Digi Storage" home="https://storage.rcs-rds.ro/" config="/koofr/#digi-storage" >}}
{{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
{{< provider name="Dropbox" home="https://www.dropbox.com/" config="/dropbox/" >}}
{{< provider name="Enterprise File Fabric" home="https://storagemadeeasy.com/about/" config="/filefabric/" >}}
@@ -150,13 +148,12 @@ WebDAV or S3, that work out of the box.)
{{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}}
{{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}}
{{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}}
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Storj" home="https://storj.io/" config="/storj/" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Uptobox" home="https://uptobox.com" config="/uptobox/" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}

View File

@@ -33,7 +33,7 @@ First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

View File

@@ -53,7 +53,7 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
r) Rename remote
c) Copy remote

View File

@@ -550,27 +550,3 @@ put them back in again.` >}}
* Fredric Arklid <fredric.arklid@consid.se>
* Andy Jackson <Andrew.Jackson@bl.uk>
* Sinan Tan <i@tinytangent.com>
* deinferno <14363193+deinferno@users.noreply.github.com>
* rsapkf <rsapkfff@pm.me>
* Will Holtz <wholtz@gmail.com>
* GGG KILLER <gggkiller2@gmail.com>
* Logeshwaran Murugesan <logeshwaran@testpress.in>
* Lu Wang <coolwanglu@gmail.com>
* Bumsu Hyeon <ksitht@gmail.com>
* Shmz Ozggrn <98463324+ShmzOzggrn@users.noreply.github.com>
* Kim <kim@jotta.no>
* Niels van de Weem <n.van.de.weem@smile.nl>
* Koopa <codingkoopa@gmail.com>
* Yunhai Luo <yunhai-luo@hotmail.com>
* Charlie Jiang <w@chariri.moe>
* Alain Nussbaumer <alain.nussbaumer@alleluia.ch>
* Vanessasaurus <814322+vsoch@users.noreply.github.com>
* Isaac Levy <isaac.r.levy@gmail.com>
* Gourav T <workflowautomation@protonmail.com>
* Paulo Martins <paulo.pontes.m@gmail.com>
* viveknathani <viveknathani2402@gmail.com>
* Eng Zer Jun <engzerjun@gmail.com>
* Abhiraj <abhiraj.official15@gmail.com>
* Márton Elek <elek@apache.org> <elek@users.noreply.github.com>
* Vincent Murphy <vdm@vdm.ie>
* ctrl-q <34975747+ctrl-q@users.noreply.github.com>

View File

@@ -19,7 +19,7 @@ configuration. For a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
@@ -81,14 +81,6 @@ key. It is stored using RFC3339 Format time with nanosecond
precision. The metadata is supplied during directory listings so
there is no overhead to using it.
### Performance
When uploading large files, increasing the value of
`--azureblob-upload-concurrency` will increase performance at the cost
of using more memory. The default of 16 is set quite conservatively to
use less memory. It maybe be necessary raise it to 64 or higher to
fully utilize a 1 GBit/s link with a single file transfer.
### Restricted filename characters
In addition to the [default restricted characters set](/overview/#restricted-characters)

View File

@@ -23,7 +23,7 @@ recommended method. See below for further details on generating and using
an Application Key.
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
q) Quit config
n/q> n

View File

@@ -22,7 +22,7 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

View File

@@ -34,7 +34,7 @@ Here is an example of how to make a remote called `test-cache`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
r) Rename remote
c) Copy remote

View File

@@ -25,7 +25,7 @@ Now configure `chunker` using `rclone config`. We will call this one `overlay`
to separate it from the `remote` itself.
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

View File

@@ -107,9 +107,8 @@ At the end of the non interactive process, rclone will return a result
with `State` as empty string.
If `--all` is passed then rclone will ask all the config questions,
not just the post config questions. Parameters that are supplied on
the command line or from environment variables are used as defaults
for questions as usual.
not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that `bin/config.py` in the rclone source implements this protocol
as a readable demonstration.

View File

@@ -74,8 +74,7 @@ Now the `dedupe` session
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
q) Quit
s/k/r/q> k
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 files with duplicate names
@@ -86,8 +85,7 @@ Now the `dedupe` session
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
q) Quit
s/k/r/q> r
s/k/r> r
two-1.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt

View File

@@ -341,7 +341,7 @@ together, if `--auth-proxy` is set the authorized keys option will be
ignored.
There is an example program
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py)
[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
in the rclone source code.
The program's job is to take a `user` and `pass` on the input and turn

View File

@@ -86,7 +86,7 @@ configure a dedicated path for encrypted content, and access it
exclusively through a crypt remote.
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
@@ -369,7 +369,7 @@ Obfuscation cannot be relied upon for strong protection.
Cloud storage systems have limits on file name length and
total path length which rclone is more likely to breach using
"Standard" file name encryption. Where file names are less than 156
"Standard" file name encryption. Where file names are less thn 156
characters in length issues should not be encountered, irrespective of
cloud storage provider.

View File

@@ -164,8 +164,8 @@ Volumes can be created with [docker volume create](https://docs.docker.com/engin
Here are a few examples:
```
docker volume create vol1 -d rclone -o remote=storj: -o vfs-cache-mode=full
docker volume create vol2 -d rclone -o remote=:storj,access_grant=xxx:heimdall
docker volume create vol3 -d rclone -o type=storj -o path=heimdall -o storj-access-grant=xxx -o poll-interval=0
docker volume create vol2 -d rclone -o remote=:tardigrade,access_grant=xxx:heimdall
docker volume create vol3 -d rclone -o type=tardigrade -o path=heimdall -o tardigrade-access-grant=xxx -o poll-interval=0
```
Note the `-d rclone` flag that tells docker to request volume from the

View File

@@ -28,7 +28,6 @@ option:
See the following for detailed instructions for
* [1Fichier](/fichier/)
* [Akamai Netstorage](/netstorage/)
* [Alias](/alias/)
* [Amazon Drive](/amazonclouddrive/)
* [Amazon S3](/s3/)
@@ -39,7 +38,6 @@ See the following for detailed instructions for
* [Compress](/compress/)
* [Crypt](/crypt/) - to encrypt other remotes
* [DigitalOcean Spaces](/s3/#digitalocean-spaces)
* [Digi Storage](/koofr/#digi-storage)
* [Dropbox](/dropbox/)
* [Enterprise File Fabric](/filefabric/)
* [FTP](/ftp/)
@@ -66,8 +64,8 @@ See the following for detailed instructions for
* [Seafile](/seafile/)
* [SFTP](/sftp/)
* [Sia](/sia/)
* [Storj](/storj/)
* [SugarSync](/sugarsync/)
* [Tardigrade](/tardigrade/)
* [Union](/union/)
* [Uptobox](/uptobox/)
* [WebDAV](/webdav/)
@@ -552,7 +550,7 @@ Is equivalent to this:
Bandwidth limit apply to the data transfer for all backends. For most
backends the directory listing bandwidth is also included (exceptions
being the non HTTP backends, `ftp`, `sftp` and `storj`).
being the non HTTP backends, `ftp`, `sftp` and `tardigrade`).
Note that the units are **Byte/s**, not **bit/s**. Typically
connections are measured in bit/s - to convert divide by 8. For
@@ -810,12 +808,6 @@ This flag can be useful for debugging and in exceptional circumstances
(e.g. Google Drive limiting the total volume of Server Side Copies to
100 GiB/day).
### --disable-http2
This stops rclone from trying to use HTTP/2 if available. This can
sometimes speed up transfers due to a
[problem in the Go standard library](https://github.com/golang/go/issues/37373).
### --dscp VALUE ###
Specify a DSCP value or name to use in connections. This could help QoS
@@ -1653,7 +1645,8 @@ This can be very useful for `rclone mount` to control the behaviour of
applications using it.
This limit applies to all HTTP based backends and to the FTP and SFTP
backends. It does not apply to the local backend or the Storj backend.
backends. It does not apply to the local backend or the Tardigrade
backend.
See also `--tpslimit-burst`.
@@ -1798,30 +1791,24 @@ The default is to run 4 file transfers in parallel.
This forces rclone to skip any files which exist on the destination
and have a modified time that is newer than the source file.
This can be useful in avoiding needless transfers when transferring to
a remote which doesn't support modification times directly (or when
using `--use-server-modtime` to avoid extra API calls) as it is more
accurate than a `--size-only` check and faster than using
`--checksum`. On such remotes (or when using `--use-server-modtime`)
the time checked will be the uploaded time.
If an existing destination file has a modification time older than the
source file's, it will be updated if the sizes are different. If the
sizes are the same, it will be updated if the checksum is different or
not available.
This can be useful when transferring to a remote which doesn't support
mod times directly (or when using `--use-server-modtime` to avoid extra
API calls) as it is more accurate than a `--size-only` check and faster
than using `--checksum`.
If an existing destination file has a modification time equal (within
the computed modify window) to the source file's, it will be updated
if the sizes are different. The checksum will not be checked in this
case unless the `--checksum` flag is provided.
the computed modify window precision) to the source file's, it will be
updated if the sizes are different. If `--checksum` is set then
rclone will update the destination if the checksums differ too.
In all other cases the file will not be updated.
If an existing destination file is older than the source file then
it will be updated if the size or checksum differs from the source file.
Consider using the `--modify-window` flag to compensate for time skews
between the source and the backend, for backends that do not support
mod times, and instead use uploaded times. However, if the backend
does not support checksums, note that sync'ing or copying within the
time skew window may still result in additional transfers for safety.
On remotes which don't support mod time directly (or when using
`--use-server-modtime`) the time checked will be the uploaded time.
This means that if uploading to one of these remotes, rclone will skip
any files which exist on the destination and have an uploaded time that
is newer than the modification time of the source file.
### --use-mmap ###

View File

@@ -18,7 +18,7 @@ See the [install](https://rclone.org/install/) documentation for more details.
| Intel/AMD - 32 Bit | {{< download windows 386 >}} | - | {{< download linux 386 >}} | {{< download linux 386 deb >}} | {{< download linux 386 rpm >}} | {{< download freebsd 386 >}} | {{< download netbsd 386 >}} | {{< download openbsd 386 >}} | {{< download plan9 386 >}} | - |
| ARMv6 - 32 Bit | - | - | {{< download linux arm >}} | {{< download linux arm deb >}} | {{< download linux arm rpm >}} | {{< download freebsd arm >}} | {{< download netbsd arm >}} | - | - | - |
| ARMv7 - 32 Bit | - | - | {{< download linux arm-v7 >}} | {{< download linux arm-v7 deb >}} | {{< download linux arm-v7 rpm >}} | {{< download freebsd arm-v7 >}} | {{< download netbsd arm-v7 >}} | - | - | - |
| ARM - 64 Bit | {{< download windows arm64 >}} | {{< download osx arm64 >}} | {{< download linux arm64 >}} | {{< download linux arm64 deb >}} | {{< download linux arm64 rpm >}} | - | - | - | - | - |
| ARM - 64 Bit | - | {{< download osx arm64 >}} | {{< download linux arm64 >}} | {{< download linux arm64 deb >}} | {{< download linux arm64 rpm >}} | - | - | - | - | - |
| MIPS - Big Endian | - | - | {{< download linux mips >}} | {{< download linux mips deb >}} | {{< download linux mips rpm >}} | - | - | - | - | - |
| MIPS - Little Endian | - | - | {{< download linux mipsle >}} | {{< download linux mipsle deb >}} | {{< download linux mipsle rpm >}} | - | - | - | - | - |

View File

@@ -22,7 +22,7 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
r) Rename remote
c) Copy remote
@@ -95,8 +95,8 @@ y/e/d> y
Note that rclone runs a webserver on your local machine to collect the
token as returned from Google if you use auto config mode. This only
runs from the moment it opens your browser to the moment you get back
the verification code. This is on `http://127.0.0.1:53682/` and it
may require you to unblock it temporarily if you are running a host
the verification code. This is on `http://127.0.0.1:53682/` and this
it may require you to unblock it temporarily if you are running a host
firewall, or use manual mode.
You can then use it like this,
@@ -410,7 +410,7 @@ For shortcuts pointing to files:
- When downloading the contents of the destination file is downloaded.
- When updating shortcut file with a non shortcut file, the shortcut is removed then a new file is uploaded in place of the shortcut.
- When server-side moving (renaming) the shortcut is renamed, not the destination file.
- When server-side copying the shortcut is copied, not the contents of the shortcut. (unless `--drive-copy-shortcut-content` is in use in which case the contents of the shortcut gets copied).
- When server-side copying the shortcut is copied, not the contents of the shortcut.
- When deleting the shortcut is deleted not the linked file.
- When setting the modification time, the modification time of the linked file will be set.
@@ -1339,8 +1339,7 @@ credentials", which opens the wizard), then "Create credentials"
to the next step; if not, click on "CONFIGURE CONSENT SCREEN" button
(near the top right corner of the right panel), then select "External"
and click on "CREATE"; on the next screen, enter an "Application name"
("rclone" is OK); enter "User Support Email" (your own email is OK);
enter "Developer Contact Email" (your own email is OK); then click on "Save" (all other data is optional).
("rclone" is OK) then click on "Save" (all other data is optional).
Click again on "Credentials" on the left panel to go back to the
"Credentials" screen.

View File

@@ -25,7 +25,7 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

View File

@@ -23,7 +23,7 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

View File

@@ -33,9 +33,6 @@ you expect. Instead use a `--filter...` flag.
### Pattern syntax
Here is a formal definition of the pattern syntax,
[examples](#examples) are below.
Rclone matching rules follow a glob style:
* matches any sequence of non-separator (/) characters
@@ -45,10 +42,8 @@ Rclone matching rules follow a glob style:
character class (must be non-empty)
{ pattern-list }
pattern alternatives
{{ regexp }}
regular expression to match
c matches character c (c != *, **, ?, \, [, {, })
\c matches reserved character c (c = *, **, ?, \, [, {, }) or character class
\c matches reserved character c (c = *, **, ?, \, [, {, })
character-range:
@@ -67,10 +62,6 @@ character classes (see [Go regular expression reference](https://golang.org/pkg/
Perl character classes (e.g. \s, \S, \w, \W)
ASCII character classes (e.g. [[:alnum:]], [[:alpha:]], [[:punct:]], [[:xdigit:]])
regexp for advanced users to insert a regular expression - see [below](#regexp) for more info:
Any re2 regular expression not containing `}}`
If the filter pattern starts with a `/` then it only matches
at the top level of the directory tree,
**relative to the root of the remote** (not necessarily the root
@@ -120,75 +111,6 @@ With `--ignore-case`
potato - matches "potato"
- matches "POTATO"
## Using regular expressions in filter patterns {#regexp}
The syntax of filter patterns is glob style matching (like `bash`
uses) to make things easy for users. However this does not provide
absolute control over the matching, so for advanced users rclone also
provides a regular expression syntax.
The regular expressions used are as defined in the [Go regular
expression reference](https://golang.org/pkg/regexp/syntax/). Regular
expressions should be enclosed in `{{` `}}`. They will match only the
last path segment if the glob doesn't start with `/` or the whole path
name if it does.
Here is how the `{{regexp}}` is transformed into an full regular
expression to match the entire path:
{{regexp}} becomes (^|/)(regexp)$
/{{regexp}} becomes ^(regexp)$
Regexp syntax can be mixed with glob syntax, for example
*.{{jpe?g}} to match file.jpg, file.jpeg but not file.png
You can also use regexp flags - to set case insensitive, for example
*.{{(?i)jpg}} to match file.jpg, file.JPG but not file.png
Be careful with wildcards in regular expressions - you don't want them
to match path separators normally. To match any file name starting
with `start` and ending with `end` write
{{start[^/]*end\.jpg}}
Not
{{start.*end\.jpg}}
Which will match a directory called `start` with a file called
`end.jpg` in it as the `.*` will match `/` characters.
Note that you can use `-vv --dump filters` to show the filter patterns
in regexp format - rclone implements the glob patters by transforming
them into regular expressions.
## Filter pattern examples {#examples}
| Description | Pattern | Matches | Does not match |
| ----------- |-------- | ------- | -------------- |
| Wildcard | `*.jpg` | `/file.jpg` | `/file.png` |
| | | `/dir/file.jpg` | `/dir/file.png` |
| Rooted | `/*.jpg` | `/file.jpg` | `/file.png` |
| | | `/file2.jpg` | `/dir/file.jpg` |
| Alternates | `*.{jpg,png}` | `/file.jpg` | `/file.gif` |
| | | `/dir/file.gif` | `/dir/file.gif` |
| Path Wildcard | `dir/**` | `/dir/anyfile` | `file.png` |
| | | `/subdir/dir/subsubdir/anyfile` | `/subdir/file.png` |
| Any Char | `*.t?t` | `/file.txt` | `/file.qxt` |
| | | `/dir/file.tzt` | `/dir/file.png` |
| Range | `*.[a-z]` | `/file.a` | `/file.0` |
| | | `/dir/file.b` | `/dir/file.1` |
| Escape | `*.\?\?\?` | `/file.???` | `/file.abc` |
| | | `/dir/file.???` | `/dir/file.def` |
| Class | `*.\d\d\d` | `/file.012` | `/file.abc` |
| | | `/dir/file.345` | `/dir/file.def` |
| Regexp | `*.{{jpe?g}}` | `/file.jpeg` | `/file.png` |
| | | `/dir/file.jpg` | `/dir/file.jpeeg` |
| Rooted Regexp | `/{{.*\.jpe?g}}` | `/file.jpeg` | `/file.png` |
| | | `/file.jpg` | `/dir/file.jpg` |
## How filter rules are applied to files
Rclone path/file name filters are made up of one or more of the following flags:

View File

@@ -112,13 +112,13 @@ These flags are available for every command.
--rc-enable-metrics Enable prometheus metrics on /metrics
--rc-files string Path to local files to serve on the HTTP server
--rc-htpasswd string htpasswd file - if not provided no authentication is done
--rc-job-expire-duration duration Expire finished async jobs older than this value (default 1m0s)
--rc-job-expire-interval duration Interval to check for expired async jobs (default 10s)
--rc-job-expire-duration duration expire finished async jobs older than this value (default 1m0s)
--rc-job-expire-interval duration interval to check for expired async jobs (default 10s)
--rc-key string SSL PEM Private key
--rc-max-header-bytes int Maximum size of request header (default 4096)
--rc-no-auth Don't require auth for certain methods
--rc-pass string Password for authentication
--rc-realm string Realm for authentication (default "rclone")
--rc-realm string realm for authentication (default "rclone")
--rc-serve Enable the serving of remote objects
--rc-server-read-timeout duration Timeout for server reading data (default 1h0m0s)
--rc-server-write-timeout duration Timeout for server writing data (default 1h0m0s)
@@ -339,7 +339,7 @@ and may be set in the config file.
--ftp-idle-timeout Duration Max time before closing idle connections (default 1m0s)
--ftp-no-check-certificate Do not verify the TLS certificate of the server
--ftp-pass string FTP password (obscured)
--ftp-port string FTP port number (default 21)
--ftp-port string FTP port, leave blank to use default (21)
--ftp-shut-timeout Duration Maximum time to wait for data connection closing status (default 1m0s)
--ftp-tls Use Implicit FTPS (FTP over TLS)
--ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
@@ -528,7 +528,7 @@ and may be set in the config file.
--sftp-md5sum-command string The command used to read md5 hashes
--sftp-pass string SSH password, leave blank to use ssh-agent (obscured)
--sftp-path-override string Override path used by SSH connection
--sftp-port string SSH port number (default 22)
--sftp-port string SSH port, leave blank to use default (22)
--sftp-pubkey-file string Optional path to public key file
--sftp-server-command string Specifies the path or command to run a sftp server on the remote host
--sftp-set-modtime Set the modified time on the remote if set (default true)

View File

@@ -27,7 +27,7 @@ For an anonymous FTP server, use `anonymous` as username and your email
address as password.
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
r) Rename remote
c) Copy remote
@@ -51,11 +51,11 @@ Choose a number from below, or type in your own value
1 / Connect to ftp.example.com
\ "ftp.example.com"
host> ftp.example.com
FTP username
Enter a string value. Press Enter for the default ("$USER").
FTP username, leave blank for current username, $USER
Enter a string value. Press Enter for the default ("").
user>
FTP port number
Enter a signed integer. Press Enter for the default (21).
FTP port, leave blank to use default (21)
Enter a string value. Press Enter for the default ("").
port>
FTP password
y) Yes type in my own password

View File

@@ -26,7 +26,7 @@ Here is an example of how to make a remote called `remote`. First run:
This will guide you through an interactive setup process:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

View File

@@ -28,7 +28,7 @@ Now proceed to interactive or manual configuration.
Run `rclone config`:
```
No remotes found, make a new one?
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config

Some files were not shown because too many files have changed in this diff Show More