mirror of
https://github.com/rclone/rclone.git
synced 2025-12-30 15:13:55 +00:00
Compare commits
12 Commits
v1.72-stab
...
a99d155fd4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a99d155fd4 | ||
|
|
f72b32b470 | ||
|
|
9be7f99bf8 | ||
|
|
6858bf242e | ||
|
|
e8c6867e4c | ||
|
|
50fbd6b049 | ||
|
|
0783cab952 | ||
|
|
886ac7af1d | ||
|
|
3c40238f02 | ||
|
|
46ca0dd7fe | ||
|
|
2e968e7ce0 | ||
|
|
1886c552db |
@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
|
||||
- make doc
|
||||
- git status - to check for new man pages - git add them
|
||||
- git commit -a -v -m "Version v1.XX.0"
|
||||
- make check
|
||||
- make retag
|
||||
- git push origin # without --follow-tags so it doesn't push the tag if it fails
|
||||
- git push --follow-tags origin
|
||||
|
||||
@@ -86,12 +86,56 @@ var (
|
||||
metadataMu sync.Mutex
|
||||
)
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"cache-control": {
|
||||
Help: "Cache-Control header",
|
||||
Type: "string",
|
||||
Example: "no-cache",
|
||||
},
|
||||
"content-disposition": {
|
||||
Help: "Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "inline",
|
||||
},
|
||||
"content-encoding": {
|
||||
Help: "Content-Encoding header",
|
||||
Type: "string",
|
||||
Example: "gzip",
|
||||
},
|
||||
"content-language": {
|
||||
Help: "Content-Language header",
|
||||
Type: "string",
|
||||
Example: "en-US",
|
||||
},
|
||||
"content-type": {
|
||||
Help: "Content-Type header",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
},
|
||||
"tier": {
|
||||
Help: "Tier of the object",
|
||||
Type: "string",
|
||||
Example: "Hot",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification, read from rclone metadata",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05.999999999Z07:00",
|
||||
},
|
||||
}
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "azureblob",
|
||||
Description: "Microsoft Azure Blob Storage",
|
||||
NewFs: NewFs,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "account",
|
||||
Help: `Azure Storage Account Name.
|
||||
@@ -810,6 +854,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
SetTier: true,
|
||||
@@ -1157,6 +1204,289 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
|
||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||
}
|
||||
|
||||
// parseXMsTags parses the value of the x-ms-tags header into a map.
|
||||
// It expects comma-separated key=value pairs. Whitespace around keys and
|
||||
// values is trimmed. Empty pairs and empty keys are rejected.
|
||||
func parseXMsTags(s string) (map[string]string, error) {
|
||||
if strings.TrimSpace(s) == "" {
|
||||
return map[string]string{}, nil
|
||||
}
|
||||
out := make(map[string]string)
|
||||
parts := strings.Split(s, ",")
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
kv := strings.SplitN(p, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, fmt.Errorf("invalid tag %q", p)
|
||||
}
|
||||
k := strings.TrimSpace(kv[0])
|
||||
v := strings.TrimSpace(kv[1])
|
||||
if k == "" {
|
||||
return nil, fmt.Errorf("invalid tag key in %q", p)
|
||||
}
|
||||
out[k] = v
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// mapMetadataToAzure maps a generic metadata map to Azure HTTP headers,
|
||||
// user metadata, tags and optional modTime override.
|
||||
// Reserved x-ms-* keys (except x-ms-tags) are ignored for user metadata.
|
||||
//
|
||||
// Pass a logger to surface non-fatal parsing issues (e.g. bad mtime).
|
||||
func mapMetadataToAzure(meta map[string]string, logf func(string, ...any)) (headers blob.HTTPHeaders, userMeta map[string]*string, tags map[string]string, modTime *time.Time, err error) {
|
||||
if meta == nil {
|
||||
return headers, nil, nil, nil, nil
|
||||
}
|
||||
tmp := make(map[string]string)
|
||||
for k, v := range meta {
|
||||
lowerKey := strings.ToLower(k)
|
||||
switch lowerKey {
|
||||
case "cache-control":
|
||||
headers.BlobCacheControl = pString(v)
|
||||
case "content-disposition":
|
||||
headers.BlobContentDisposition = pString(v)
|
||||
case "content-encoding":
|
||||
headers.BlobContentEncoding = pString(v)
|
||||
case "content-language":
|
||||
headers.BlobContentLanguage = pString(v)
|
||||
case "content-type":
|
||||
headers.BlobContentType = pString(v)
|
||||
case "x-ms-tags":
|
||||
parsed, perr := parseXMsTags(v)
|
||||
if perr != nil {
|
||||
return headers, nil, nil, nil, perr
|
||||
}
|
||||
// allocate only if there are tags
|
||||
if len(parsed) > 0 {
|
||||
tags = parsed
|
||||
}
|
||||
case "mtime":
|
||||
// Accept multiple layouts for tolerance
|
||||
var parsed time.Time
|
||||
var pErr error
|
||||
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, timeFormatOut} {
|
||||
parsed, pErr = time.Parse(layout, v)
|
||||
if pErr == nil {
|
||||
modTime = &parsed
|
||||
break
|
||||
}
|
||||
}
|
||||
// Log and ignore if unparseable
|
||||
if modTime == nil && logf != nil {
|
||||
logf("metadata: couldn't parse mtime %q: %v", v, pErr)
|
||||
}
|
||||
case "tier":
|
||||
// ignore - handled elsewhere
|
||||
default:
|
||||
// Filter out other reserved headers so they don't end up as user metadata
|
||||
if strings.HasPrefix(lowerKey, "x-ms-") {
|
||||
continue
|
||||
}
|
||||
tmp[lowerKey] = v
|
||||
}
|
||||
}
|
||||
userMeta = toAzureMetaPtr(tmp)
|
||||
return headers, userMeta, tags, modTime, nil
|
||||
}
|
||||
|
||||
// toAzureMetaPtr converts a map[string]string to map[string]*string as used by Azure SDK
|
||||
func toAzureMetaPtr(in map[string]string) map[string]*string {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make(map[string]*string, len(in))
|
||||
for k, v := range in {
|
||||
vv := v
|
||||
out[k] = &vv
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// assembleCopyParams prepares headers, metadata and tags for copy operations.
|
||||
//
|
||||
// It starts from the source properties, optionally overlays mapped metadata
|
||||
// from rclone's metadata options, ensures mtime presence when mapping is
|
||||
// enabled, and returns whether mapping was actually requested (hadMapping).
|
||||
// assembleCopyParams prepares headers, metadata and tags for copy operations.
|
||||
//
|
||||
// If includeBaseMeta is true, start user metadata from the source's metadata
|
||||
// and overlay mapped values. This matches multipart copy commit behavior.
|
||||
// If false, only include mapped user metadata (no source baseline) which
|
||||
// matches previous singlepart StartCopyFromURL semantics.
|
||||
func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blob.GetPropertiesResponse, includeBaseMeta bool) (headers blob.HTTPHeaders, meta map[string]*string, tags map[string]string, hadMapping bool, err error) {
|
||||
// Start from source properties
|
||||
headers = blob.HTTPHeaders{
|
||||
BlobCacheControl: srcProps.CacheControl,
|
||||
BlobContentDisposition: srcProps.ContentDisposition,
|
||||
BlobContentEncoding: srcProps.ContentEncoding,
|
||||
BlobContentLanguage: srcProps.ContentLanguage,
|
||||
BlobContentMD5: srcProps.ContentMD5,
|
||||
BlobContentType: srcProps.ContentType,
|
||||
}
|
||||
// Optionally deep copy user metadata pointers from source. Normalise keys to
|
||||
// lower-case to avoid duplicate x-ms-meta headers when we later inject/overlay
|
||||
// metadata (Azure treats keys case-insensitively but Go's http.Header will
|
||||
// join duplicate keys into a comma separated list, which breaks shared-key
|
||||
// signing).
|
||||
if includeBaseMeta && len(srcProps.Metadata) > 0 {
|
||||
meta = make(map[string]*string, len(srcProps.Metadata))
|
||||
for k, v := range srcProps.Metadata {
|
||||
if v != nil {
|
||||
vv := *v
|
||||
meta[strings.ToLower(k)] = &vv
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only consider mapping if metadata pipeline is enabled
|
||||
if fs.GetConfig(ctx).Metadata {
|
||||
mapped, mapErr := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if mapErr != nil {
|
||||
return headers, meta, nil, false, fmt.Errorf("failed to map metadata: %w", mapErr)
|
||||
}
|
||||
if mapped != nil {
|
||||
// Map rclone metadata to Azure shapes
|
||||
mappedHeaders, userMeta, mappedTags, mappedModTime, herr := mapMetadataToAzure(mapped, func(format string, args ...any) { fs.Debugf(f, format, args...) })
|
||||
if herr != nil {
|
||||
return headers, meta, nil, false, fmt.Errorf("metadata mapping: %w", herr)
|
||||
}
|
||||
hadMapping = true
|
||||
// Overlay headers (only non-nil)
|
||||
if mappedHeaders.BlobCacheControl != nil {
|
||||
headers.BlobCacheControl = mappedHeaders.BlobCacheControl
|
||||
}
|
||||
if mappedHeaders.BlobContentDisposition != nil {
|
||||
headers.BlobContentDisposition = mappedHeaders.BlobContentDisposition
|
||||
}
|
||||
if mappedHeaders.BlobContentEncoding != nil {
|
||||
headers.BlobContentEncoding = mappedHeaders.BlobContentEncoding
|
||||
}
|
||||
if mappedHeaders.BlobContentLanguage != nil {
|
||||
headers.BlobContentLanguage = mappedHeaders.BlobContentLanguage
|
||||
}
|
||||
if mappedHeaders.BlobContentType != nil {
|
||||
headers.BlobContentType = mappedHeaders.BlobContentType
|
||||
}
|
||||
// Overlay user metadata
|
||||
if len(userMeta) > 0 {
|
||||
if meta == nil {
|
||||
meta = make(map[string]*string, len(userMeta))
|
||||
}
|
||||
for k, v := range userMeta {
|
||||
meta[k] = v
|
||||
}
|
||||
}
|
||||
// Apply tags if any
|
||||
if len(mappedTags) > 0 {
|
||||
tags = mappedTags
|
||||
}
|
||||
// Ensure mtime present using mapped or source time
|
||||
if _, ok := meta[modTimeKey]; !ok {
|
||||
when := src.ModTime(ctx)
|
||||
if mappedModTime != nil {
|
||||
when = *mappedModTime
|
||||
}
|
||||
val := when.Format(time.RFC3339Nano)
|
||||
if meta == nil {
|
||||
meta = make(map[string]*string, 1)
|
||||
}
|
||||
meta[modTimeKey] = &val
|
||||
}
|
||||
// Ensure content-type fallback to source if not set by mapper
|
||||
if headers.BlobContentType == nil {
|
||||
headers.BlobContentType = srcProps.ContentType
|
||||
}
|
||||
} else {
|
||||
// Mapping enabled but not provided: ensure mtime present based on source ModTime
|
||||
if _, ok := meta[modTimeKey]; !ok {
|
||||
when := src.ModTime(ctx)
|
||||
val := when.Format(time.RFC3339Nano)
|
||||
if meta == nil {
|
||||
meta = make(map[string]*string, 1)
|
||||
}
|
||||
meta[modTimeKey] = &val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return headers, meta, tags, hadMapping, nil
|
||||
}
|
||||
|
||||
// applyMappedMetadata applies mapped metadata and headers to the object state for uploads.
|
||||
//
|
||||
// It reads `--metadata`, `--metadata-set`, and `--metadata-mapper` outputs via fs.GetMetadataOptions
|
||||
// and updates o.meta, o.tags and ui.httpHeaders accordingly.
|
||||
func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui *uploadInfo, options []fs.OpenOption) (modTime time.Time, err error) {
|
||||
// Start from the source modtime; may be overridden by metadata
|
||||
modTime = src.ModTime(ctx)
|
||||
|
||||
// Fetch mapped metadata if --metadata is enabled
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return modTime, err
|
||||
}
|
||||
if meta == nil {
|
||||
// No metadata processing requested
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// Map metadata using common helper
|
||||
headers, userMeta, tags, mappedModTime, err := mapMetadataToAzure(meta, func(format string, args ...any) { fs.Debugf(o, format, args...) })
|
||||
if err != nil {
|
||||
return modTime, err
|
||||
}
|
||||
// Merge headers into ui
|
||||
if headers.BlobCacheControl != nil {
|
||||
ui.httpHeaders.BlobCacheControl = headers.BlobCacheControl
|
||||
}
|
||||
if headers.BlobContentDisposition != nil {
|
||||
ui.httpHeaders.BlobContentDisposition = headers.BlobContentDisposition
|
||||
}
|
||||
if headers.BlobContentEncoding != nil {
|
||||
ui.httpHeaders.BlobContentEncoding = headers.BlobContentEncoding
|
||||
}
|
||||
if headers.BlobContentLanguage != nil {
|
||||
ui.httpHeaders.BlobContentLanguage = headers.BlobContentLanguage
|
||||
}
|
||||
if headers.BlobContentType != nil {
|
||||
ui.httpHeaders.BlobContentType = headers.BlobContentType
|
||||
}
|
||||
|
||||
// Apply user metadata to o.meta with a single critical section
|
||||
if len(userMeta) > 0 {
|
||||
metadataMu.Lock()
|
||||
if o.meta == nil {
|
||||
o.meta = make(map[string]string, len(userMeta))
|
||||
}
|
||||
for k, v := range userMeta {
|
||||
if v != nil {
|
||||
o.meta[k] = *v
|
||||
}
|
||||
}
|
||||
metadataMu.Unlock()
|
||||
}
|
||||
|
||||
// Apply tags
|
||||
if len(tags) > 0 {
|
||||
if o.tags == nil {
|
||||
o.tags = make(map[string]string, len(tags))
|
||||
}
|
||||
for k, v := range tags {
|
||||
o.tags[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if mappedModTime != nil {
|
||||
modTime = *mappedModTime
|
||||
}
|
||||
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
// Returns whether file is a directory marker or not
|
||||
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
|
||||
// Directory markers are 0 length
|
||||
@@ -1951,18 +2281,19 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert metadata from source object
|
||||
// Prepare metadata/headers/tags for destination
|
||||
// For multipart commit, include base metadata from source then overlay mapped
|
||||
commitHeaders, commitMeta, commitTags, _, err := assembleCopyParams(ctx, f, src, srcProperties, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("multipart copy: %w", err)
|
||||
}
|
||||
|
||||
// Convert metadata from source or mapper
|
||||
options := blockblob.CommitBlockListOptions{
|
||||
Metadata: srcProperties.Metadata,
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
HTTPHeaders: &blob.HTTPHeaders{
|
||||
BlobCacheControl: srcProperties.CacheControl,
|
||||
BlobContentDisposition: srcProperties.ContentDisposition,
|
||||
BlobContentEncoding: srcProperties.ContentEncoding,
|
||||
BlobContentLanguage: srcProperties.ContentLanguage,
|
||||
BlobContentMD5: srcProperties.ContentMD5,
|
||||
BlobContentType: srcProperties.ContentType,
|
||||
},
|
||||
Metadata: commitMeta,
|
||||
Tags: commitTags,
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
HTTPHeaders: &commitHeaders,
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
@@ -1993,10 +2324,36 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
||||
return nil, fmt.Errorf("single part copy: source auth: %w", err)
|
||||
}
|
||||
|
||||
// Start the copy
|
||||
// Prepare mapped metadata/tags/headers if requested
|
||||
options := blob.StartCopyFromURLOptions{
|
||||
Tier: parseTier(f.opt.AccessTier),
|
||||
}
|
||||
var postHeaders *blob.HTTPHeaders
|
||||
// Read source properties and assemble params; this also handles the case when mapping is disabled
|
||||
srcProps, err := src.readMetaDataAlways(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("single part copy: read source properties: %w", err)
|
||||
}
|
||||
// For singlepart copy, do not include base metadata from source in StartCopyFromURL
|
||||
headers, meta, tags, hadMapping, aerr := assembleCopyParams(ctx, f, src, srcProps, false)
|
||||
if aerr != nil {
|
||||
return nil, fmt.Errorf("single part copy: %w", aerr)
|
||||
}
|
||||
// Apply tags and post-copy headers only when mapping requested changes
|
||||
if len(tags) > 0 {
|
||||
options.BlobTags = make(map[string]string, len(tags))
|
||||
for k, v := range tags {
|
||||
options.BlobTags[k] = v
|
||||
}
|
||||
}
|
||||
if hadMapping {
|
||||
// Only set metadata explicitly when mapping was requested; otherwise
|
||||
// let the service copy source metadata (including mtime) automatically.
|
||||
if len(meta) > 0 {
|
||||
options.Metadata = meta
|
||||
}
|
||||
postHeaders = &headers
|
||||
}
|
||||
var startCopy blob.StartCopyFromURLResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
|
||||
@@ -2026,6 +2383,16 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
|
||||
pollTime = min(2*pollTime, time.Second)
|
||||
}
|
||||
|
||||
// If mapper requested header changes, set them post-copy
|
||||
if postHeaders != nil {
|
||||
blb := f.getBlobSVC(dstContainer, dstPath)
|
||||
_, setErr := blb.SetHTTPHeaders(ctx, *postHeaders, nil)
|
||||
if setErr != nil {
|
||||
return nil, fmt.Errorf("single part copy: failed to set headers: %w", setErr)
|
||||
}
|
||||
}
|
||||
// Metadata (when requested) is set via StartCopyFromURL options.Metadata
|
||||
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
@@ -2157,6 +2524,35 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
|
||||
return metadata
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It returns a combined view of system and user metadata.
|
||||
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
// Ensure metadata is loaded
|
||||
if err := o.readMetaData(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := fs.Metadata{}
|
||||
|
||||
// System metadata we expose
|
||||
if !o.modTime.IsZero() {
|
||||
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
|
||||
}
|
||||
if o.accessTier != "" {
|
||||
m["tier"] = string(o.accessTier)
|
||||
}
|
||||
|
||||
// Merge user metadata (already lower-cased keys)
|
||||
metadataMu.Lock()
|
||||
for k, v := range o.meta {
|
||||
m[k] = v
|
||||
}
|
||||
metadataMu.Unlock()
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
|
||||
//
|
||||
// Sets
|
||||
@@ -2995,17 +3391,19 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
// containerPath = containerPath[:len(containerPath)-1]
|
||||
// }
|
||||
|
||||
// Update Mod time
|
||||
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
|
||||
// Create the HTTP headers for the upload
|
||||
// Start with default content-type based on source
|
||||
ui.httpHeaders = blob.HTTPHeaders{
|
||||
BlobContentType: pString(fs.MimeType(ctx, src)),
|
||||
}
|
||||
|
||||
// Apply mapped metadata/headers/tags if requested
|
||||
modTime, err := o.applyMappedMetadata(ctx, src, &ui, options)
|
||||
if err != nil {
|
||||
return ui, err
|
||||
}
|
||||
// Ensure mtime is set in metadata based on possibly overridden modTime
|
||||
o.updateMetadataWithModTime(modTime)
|
||||
|
||||
// Compute the Content-MD5 of the file. As we stream all uploads it
|
||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
|
||||
@@ -5,11 +5,16 @@ package azureblob
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
|
||||
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@@ -148,4 +153,417 @@ func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Features", f.testFeatures)
|
||||
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
|
||||
t.Run("Metadata", f.testMetadataPaths)
|
||||
}
|
||||
|
||||
// helper to read blob properties for an object
|
||||
func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse {
|
||||
ao := o.(*Object)
|
||||
props, err := ao.readMetaDataAlways(ctx)
|
||||
require.NoError(t, err)
|
||||
return props
|
||||
}
|
||||
|
||||
// helper to assert select headers and user metadata
|
||||
func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) {
|
||||
// Headers
|
||||
get := func(p *string) string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return *p
|
||||
}
|
||||
if v, ok := want["content-type"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentType), "content-type")
|
||||
}
|
||||
if v, ok := want["cache-control"]; ok {
|
||||
assert.Equal(t, v, get(props.CacheControl), "cache-control")
|
||||
}
|
||||
if v, ok := want["content-disposition"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentDisposition), "content-disposition")
|
||||
}
|
||||
if v, ok := want["content-encoding"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentEncoding), "content-encoding")
|
||||
}
|
||||
if v, ok := want["content-language"]; ok {
|
||||
assert.Equal(t, v, get(props.ContentLanguage), "content-language")
|
||||
}
|
||||
// User metadata (case-insensitive keys from service)
|
||||
norm := make(map[string]*string, len(props.Metadata))
|
||||
for kk, vv := range props.Metadata {
|
||||
norm[strings.ToLower(kk)] = vv
|
||||
}
|
||||
for k, v := range wantUserMeta {
|
||||
pv, ok := norm[strings.ToLower(k)]
|
||||
if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) {
|
||||
if pv == nil {
|
||||
assert.Equal(t, v, "", k)
|
||||
} else {
|
||||
assert.Equal(t, v, *pv, k)
|
||||
}
|
||||
} else {
|
||||
// Log available keys for diagnostics
|
||||
keys := make([]string, 0, len(props.Metadata))
|
||||
for kk := range props.Metadata {
|
||||
keys = append(keys, kk)
|
||||
}
|
||||
t.Logf("available user metadata keys: %v", keys)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// helper to read blob tags for an object
|
||||
func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string {
|
||||
ao := o.(*Object)
|
||||
blb := ao.getBlobSVC()
|
||||
resp, err := blb.GetTags(ctx, nil)
|
||||
require.NoError(t, err)
|
||||
out := make(map[string]string)
|
||||
for _, tag := range resp.BlobTagSet {
|
||||
if tag.Key != nil {
|
||||
k := *tag.Key
|
||||
v := ""
|
||||
if tag.Value != nil {
|
||||
v = *tag.Value
|
||||
}
|
||||
out[k] = v
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Test metadata across different write paths
|
||||
func (f *Fs) testMetadataPaths(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in short mode")
|
||||
}
|
||||
|
||||
// Common expected metadata and headers
|
||||
baseMeta := fs.Metadata{
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-language": "en-US",
|
||||
// Note: Don't set content-encoding here to avoid download decoding differences
|
||||
// We will set a custom user metadata key
|
||||
"potato": "royal",
|
||||
// and modtime
|
||||
"mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano),
|
||||
}
|
||||
|
||||
// Singlepart upload
|
||||
t.Run("PutSinglepart", func(t *testing.T) {
|
||||
// size less than chunk size
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
// override content-type via metadata mapping
|
||||
meta := fs.Metadata{}
|
||||
meta.Merge(baseMeta)
|
||||
meta["content-type"] = "text/plain"
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
props := getProps(ctx, t, obj)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "text/plain",
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-language": "en-US",
|
||||
}, map[string]string{
|
||||
"potato": "royal",
|
||||
})
|
||||
_ = http.StatusOK // keep import for parity but don't inspect RawResponse
|
||||
})
|
||||
|
||||
// Multipart upload
|
||||
t.Run("PutMultipart", func(t *testing.T) {
|
||||
// size greater than chunk size to force multipart
|
||||
contents := random.String(int(f.opt.ChunkSize + 1024))
|
||||
item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
meta := fs.Metadata{}
|
||||
meta.Merge(baseMeta)
|
||||
meta["content-type"] = "application/json"
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
props := getProps(ctx, t, obj)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "application/json",
|
||||
"cache-control": "no-cache",
|
||||
"content-disposition": "inline",
|
||||
"content-language": "en-US",
|
||||
}, map[string]string{
|
||||
"potato": "royal",
|
||||
})
|
||||
|
||||
// Tags: Singlepart upload
|
||||
t.Run("PutSinglepartTags", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
meta := fs.Metadata{
|
||||
"x-ms-tags": "env=dev,team=sync",
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
tags := getTagsMap(ctx, t, obj)
|
||||
assert.Equal(t, "dev", tags["env"])
|
||||
assert.Equal(t, "sync", tags["team"])
|
||||
})
|
||||
|
||||
// Tags: Multipart upload
|
||||
t.Run("PutMultipartTags", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.ChunkSize + 2048))
|
||||
item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
meta := fs.Metadata{
|
||||
"x-ms-tags": "project=alpha,release=2025-08",
|
||||
}
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta)
|
||||
defer func() { _ = obj.Remove(ctx) }()
|
||||
|
||||
tags := getTagsMap(ctx, t, obj)
|
||||
assert.Equal(t, "alpha", tags["project"])
|
||||
assert.Equal(t, "2025-08", tags["release"])
|
||||
})
|
||||
})
|
||||
|
||||
// Singlepart copy with metadata-set mapping; omit content-type to exercise fallback
|
||||
t.Run("CopySinglepart", func(t *testing.T) {
|
||||
// create small source
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
// set mapping via MetadataSet
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"cache-control": "private, max-age=60",
|
||||
"content-disposition": "attachment; filename=foo.txt",
|
||||
"content-language": "fr",
|
||||
// no content-type: should fallback to source
|
||||
"potato": "maris",
|
||||
}
|
||||
|
||||
// do copy
|
||||
dstName := "meta-copy-single-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
// content-type should fallback to source (text/plain)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "text/plain",
|
||||
"cache-control": "private, max-age=60",
|
||||
"content-disposition": "attachment; filename=foo.txt",
|
||||
"content-language": "fr",
|
||||
}, map[string]string{
|
||||
"potato": "maris",
|
||||
})
|
||||
// mtime should be populated on copy when --metadata is used
|
||||
// and should equal the source ModTime (RFC3339Nano)
|
||||
// Read user metadata (case-insensitive)
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
// parse and compare times ignoring formatting differences
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type
|
||||
t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
|
||||
dstName := "meta-copy-single-only-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "text/plain",
|
||||
}, map[string]string{})
|
||||
// Assert mtime injected
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// Multipart copy with metadata-set mapping; omit content-type to exercise fallback
|
||||
t.Run("CopyMultipart", func(t *testing.T) {
|
||||
// create large source to force multipart
|
||||
contents := random.String(int(f.opt.CopyCutoff + 1024))
|
||||
srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
// set mapping via MetadataSet
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"cache-control": "max-age=0, no-cache",
|
||||
// omit content-type to trigger fallback
|
||||
"content-language": "de",
|
||||
"potato": "desiree",
|
||||
}
|
||||
|
||||
dstName := "meta-copy-multi-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
// content-type should fallback to source (application/octet-stream)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "application/octet-stream",
|
||||
"cache-control": "max-age=0, no-cache",
|
||||
"content-language": "de",
|
||||
}, map[string]string{
|
||||
"potato": "desiree",
|
||||
})
|
||||
// mtime should be populated on copy when --metadata is used
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// CopyMultipart with only --metadata must inject mtime and preserve src content-type
|
||||
t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) {
|
||||
contents := random.String(int(f.opt.CopyCutoff + 2048))
|
||||
srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
|
||||
dstName := "meta-copy-multi-only-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
props := getProps(ctx2, t, dst)
|
||||
assertHeadersAndMetadata(t, props, map[string]string{
|
||||
"content-type": "application/octet-stream",
|
||||
}, map[string]string{})
|
||||
m := props.Metadata
|
||||
var gotMtime string
|
||||
for k, v := range m {
|
||||
if strings.EqualFold(k, "mtime") && v != nil {
|
||||
gotMtime = *v
|
||||
break
|
||||
}
|
||||
}
|
||||
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
|
||||
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
|
||||
}
|
||||
})
|
||||
|
||||
// Tags: Singlepart copy
|
||||
t.Run("CopySinglepartTags", func(t *testing.T) {
|
||||
// create small source
|
||||
contents := random.String(int(f.opt.ChunkSize / 2))
|
||||
srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
// set mapping via MetadataSet including tags
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"x-ms-tags": "copy=single,mode=test",
|
||||
}
|
||||
|
||||
dstName := "tags-copy-single-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
tags := getTagsMap(ctx2, t, dst)
|
||||
assert.Equal(t, "single", tags["copy"])
|
||||
assert.Equal(t, "test", tags["mode"])
|
||||
})
|
||||
|
||||
// Tags: Multipart copy
|
||||
t.Run("CopyMultipartTags", func(t *testing.T) {
|
||||
// create large source to force multipart
|
||||
contents := random.String(int(f.opt.CopyCutoff + 4096))
|
||||
srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
|
||||
defer func() { _ = srcObj.Remove(ctx) }()
|
||||
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
ci.MetadataSet = fs.Metadata{
|
||||
"x-ms-tags": "copy=multi,mode=test",
|
||||
}
|
||||
|
||||
dstName := "tags-copy-multi-dst.txt"
|
||||
dst, err := f.Copy(ctx2, srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = dst.Remove(ctx2) }()
|
||||
|
||||
tags := getTagsMap(ctx2, t, dst)
|
||||
assert.Equal(t, "multi", tags["copy"])
|
||||
assert.Equal(t, "test", tags["mode"])
|
||||
})
|
||||
|
||||
// Negative: invalid x-ms-tags must error
|
||||
t.Run("InvalidXMsTags", func(t *testing.T) {
|
||||
contents := random.String(32)
|
||||
item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
|
||||
// construct ObjectInfo with invalid x-ms-tags
|
||||
buf := strings.NewReader(contents)
|
||||
// Build obj info with metadata
|
||||
meta := fs.Metadata{
|
||||
"x-ms-tags": "badpair-without-equals",
|
||||
}
|
||||
// force metadata on
|
||||
ctx2, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil)
|
||||
obji = obji.WithMetadata(meta).WithMimeType("text/plain")
|
||||
_, err := f.Put(ctx2, buf, obji)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid tag")
|
||||
})
|
||||
}
|
||||
|
||||
@@ -133,23 +133,32 @@ type File struct {
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
// StorageAPI is as returned from the b2_authorize_account call
|
||||
type StorageAPI struct {
|
||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||
Buckets []struct { // When present, access is restricted to one or more buckets.
|
||||
ID string `json:"id"` // ID of bucket
|
||||
Name string `json:"name"` // When present, name of bucket - may be empty
|
||||
} `json:"buckets"`
|
||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
|
||||
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||
} `json:"allowed"`
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
|
||||
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
|
||||
Storage StorageAPI `json:"storageApi"`
|
||||
} `json:"apiInfo"`
|
||||
}
|
||||
|
||||
// ListBucketsRequest is parameters for b2_list_buckets call
|
||||
type ListBucketsRequest struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
|
||||
120
backend/b2/b2.go
120
backend/b2/b2.go
@@ -607,17 +607,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to authorize account: %w", err)
|
||||
}
|
||||
// If this is a key limited to a single bucket, it must exist already
|
||||
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
|
||||
if allowedBucket == "" {
|
||||
return nil, errors.New("bucket that application key is restricted to no longer exists")
|
||||
// If this is a key limited to one or more buckets, one of them must exist
|
||||
// and be ours.
|
||||
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
|
||||
buckets := f.info.APIs.Storage.Allowed.Buckets
|
||||
var rootFound = false
|
||||
var rootID string
|
||||
for _, b := range buckets {
|
||||
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
|
||||
if allowedBucket == "" {
|
||||
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
if allowedBucket == f.rootBucket {
|
||||
rootFound = true
|
||||
rootID = b.ID
|
||||
}
|
||||
}
|
||||
if allowedBucket != f.rootBucket {
|
||||
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
|
||||
if !rootFound {
|
||||
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
|
||||
}
|
||||
f.cache.MarkOK(f.rootBucket)
|
||||
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
|
||||
f.setBucketID(f.rootBucket, rootID)
|
||||
}
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the (bucket,directory) is actually an existing file
|
||||
@@ -643,7 +655,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
Path: "/b2api/v4/b2_authorize_account",
|
||||
RootURL: f.opt.Endpoint,
|
||||
UserName: f.opt.Account,
|
||||
Password: f.opt.Key,
|
||||
@@ -656,13 +668,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to authenticate: %w", err)
|
||||
}
|
||||
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||
func (f *Fs) hasPermission(permission string) bool {
|
||||
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
@@ -1067,44 +1079,68 @@ type listBucketFn func(*api.Bucket) error
|
||||
|
||||
// listBucketsToFn lists the buckets to the function supplied
|
||||
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: f.info.Allowed.BucketID,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
|
||||
|
||||
for i := range f.info.APIs.Storage.Allowed.Buckets {
|
||||
b := &f.info.APIs.Storage.Allowed.Buckets[i]
|
||||
// Empty names indicate a bucket that no longer exists, this is non-fatal
|
||||
// for multi-bucket API keys.
|
||||
if b.Name == "" {
|
||||
continue
|
||||
}
|
||||
// When requesting a specific bucket skip over non-matching names
|
||||
if bucketName != "" && b.Name != bucketName {
|
||||
continue
|
||||
}
|
||||
|
||||
var account = api.ListBucketsRequest{
|
||||
AccountID: f.info.AccountID,
|
||||
BucketID: b.ID,
|
||||
}
|
||||
if bucketName != "" && account.BucketID == "" {
|
||||
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses = append(responses, response)
|
||||
}
|
||||
|
||||
var response api.ListBucketsResponse
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_list_buckets",
|
||||
}
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.bucketIDMutex.Lock()
|
||||
f.bucketTypeMutex.Lock()
|
||||
f._bucketID = make(map[string]string, 1)
|
||||
f._bucketType = make(map[string]string, 1)
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
|
||||
f.cache.MarkOK(bucket.Name)
|
||||
f._bucketID[bucket.Name] = bucket.ID
|
||||
f._bucketType[bucket.Name] = bucket.Type
|
||||
}
|
||||
}
|
||||
f.bucketTypeMutex.Unlock()
|
||||
f.bucketIDMutex.Unlock()
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err = fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
for ri := range responses {
|
||||
response := &responses[ri]
|
||||
for i := range response.Buckets {
|
||||
bucket := &response.Buckets[i]
|
||||
err := fn(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -1606,7 +1642,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
bucket, bucketPath := f.split(remote)
|
||||
var RootURL string
|
||||
if f.opt.DownloadURL == "" {
|
||||
RootURL = f.info.DownloadURL
|
||||
RootURL = f.info.APIs.Storage.DownloadURL
|
||||
} else {
|
||||
RootURL = f.opt.DownloadURL
|
||||
}
|
||||
@@ -1957,7 +1993,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
|
||||
// Use downloadUrl from backblaze if downloadUrl is not set
|
||||
// otherwise use the custom downloadUrl
|
||||
if o.fs.opt.DownloadURL == "" {
|
||||
opts.RootURL = o.fs.info.DownloadURL
|
||||
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
|
||||
} else {
|
||||
opts.RootURL = o.fs.opt.DownloadURL
|
||||
}
|
||||
|
||||
@@ -403,14 +403,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
pos := strings.Index(ciphertext, ".")
|
||||
if pos == -1 {
|
||||
before, after, ok := strings.Cut(ciphertext, ".")
|
||||
if !ok {
|
||||
return "", ErrorNotAnEncryptedFile
|
||||
} // No .
|
||||
num := ciphertext[:pos]
|
||||
num := before
|
||||
if num == "!" {
|
||||
// No rotation; probably original was not valid unicode
|
||||
return ciphertext[pos+1:], nil
|
||||
return after, nil
|
||||
}
|
||||
dir, err := strconv.Atoi(num)
|
||||
if err != nil {
|
||||
@@ -425,7 +425,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
||||
var result bytes.Buffer
|
||||
|
||||
inQuote := false
|
||||
for _, runeValue := range ciphertext[pos+1:] {
|
||||
for _, runeValue := range after {
|
||||
switch {
|
||||
case inQuote:
|
||||
_, _ = result.WriteRune(runeValue)
|
||||
|
||||
@@ -30,9 +30,11 @@ import (
|
||||
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
"github.com/aws/smithy-go"
|
||||
"github.com/aws/smithy-go/logging"
|
||||
"github.com/aws/smithy-go/middleware"
|
||||
@@ -325,6 +327,30 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
Help: "An AWS session token.",
|
||||
Advanced: true,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "role_arn",
|
||||
Help: `ARN of the IAM role to assume.
|
||||
|
||||
Leave blank if not using assume role.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "role_session_name",
|
||||
Help: `Session name for assumed role.
|
||||
|
||||
If empty, a session name will be generated automatically.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "role_session_duration",
|
||||
Help: `Session duration for assumed role.
|
||||
|
||||
If empty, the default session duration will be used.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "role_external_id",
|
||||
Help: `External ID for assumed role.
|
||||
|
||||
Leave blank if not using an external ID.`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads and copies.
|
||||
@@ -927,6 +953,10 @@ type Options struct {
|
||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||
Profile string `config:"profile"`
|
||||
SessionToken string `config:"session_token"`
|
||||
RoleARN string `config:"role_arn"`
|
||||
RoleSessionName string `config:"role_session_name"`
|
||||
RoleSessionDuration fs.Duration `config:"role_session_duration"`
|
||||
RoleExternalID string `config:"role_external_id"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
@@ -1290,6 +1320,34 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
opt.Region = "us-east-1"
|
||||
}
|
||||
|
||||
// Handle assume role if RoleARN is specified
|
||||
if opt.RoleARN != "" {
|
||||
fs.Debugf(nil, "Using assume role with ARN: %s", opt.RoleARN)
|
||||
|
||||
// Set region for the config before creating STS client
|
||||
awsConfig.Region = opt.Region
|
||||
|
||||
// Create STS client using the base credentials
|
||||
stsClient := sts.NewFromConfig(awsConfig)
|
||||
|
||||
// Configure AssumeRole options
|
||||
assumeRoleOptions := func(aro *stscreds.AssumeRoleOptions) {
|
||||
// Set session name if provided, otherwise use a default
|
||||
if opt.RoleSessionName != "" {
|
||||
aro.RoleSessionName = opt.RoleSessionName
|
||||
}
|
||||
if opt.RoleSessionDuration != 0 {
|
||||
aro.Duration = time.Duration(opt.RoleSessionDuration)
|
||||
}
|
||||
if opt.RoleExternalID != "" {
|
||||
aro.ExternalID = &opt.RoleExternalID
|
||||
}
|
||||
}
|
||||
|
||||
// Create AssumeRole credentials provider
|
||||
awsConfig.Credentials = stscreds.NewAssumeRoleProvider(stsClient, opt.RoleARN, assumeRoleOptions)
|
||||
}
|
||||
|
||||
provider = loadProvider(opt.Provider)
|
||||
if provider == nil {
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
|
||||
@@ -389,8 +389,8 @@ func parseHash(str string) (string, string, error) {
|
||||
if str == "-" {
|
||||
return "", "", nil
|
||||
}
|
||||
if pos := strings.Index(str, ":"); pos > 0 {
|
||||
name, val := str[:pos], str[pos+1:]
|
||||
if before, after, ok := strings.Cut(str, ":"); ok {
|
||||
name, val := before, after
|
||||
if name != "" && val != "" {
|
||||
return name, val, nil
|
||||
}
|
||||
|
||||
@@ -58,10 +58,10 @@ type conn struct {
|
||||
// interoperate with the rclone sftp backend
|
||||
func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) {
|
||||
binary, args := command, ""
|
||||
space := strings.Index(command, " ")
|
||||
if space >= 0 {
|
||||
binary = command[:space]
|
||||
args = strings.TrimLeft(command[space+1:], " ")
|
||||
before, after, ok := strings.Cut(command, " ")
|
||||
if ok {
|
||||
binary = before
|
||||
args = strings.TrimLeft(after, " ")
|
||||
}
|
||||
args = shellUnEscape(args)
|
||||
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)
|
||||
|
||||
@@ -1048,3 +1048,5 @@ put them back in again. -->
|
||||
- jijamik <30904953+jijamik@users.noreply.github.com>
|
||||
- Dominik Sander <git@dsander.de>
|
||||
- Nikolay Kiryanov <nikolay@kiryanov.ru>
|
||||
- Diana <5275194+DianaNites@users.noreply.github.com>
|
||||
- Duncan Smart <duncan.smart@gmail.com>
|
||||
|
||||
@@ -103,6 +103,26 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in
|
||||
chunks only have an MD5 if the source remote was capable of MD5
|
||||
hashes, e.g. the local disk.
|
||||
|
||||
### Metadata and tags
|
||||
|
||||
Rclone can map arbitrary metadata to Azure Blob headers, user metadata, and tags
|
||||
when `--metadata` is enabled (or when using `--metadata-set` / `--metadata-mapper`).
|
||||
|
||||
- Headers: Set these keys in metadata to map to the corresponding blob headers:
|
||||
- `cache-control`, `content-disposition`, `content-encoding`, `content-language`, `content-type`.
|
||||
- User metadata: Any other non-reserved keys are written as user metadata
|
||||
(keys are normalized to lowercase). Keys starting with `x-ms-` are reserved and
|
||||
are not stored as user metadata.
|
||||
- Tags: Provide `x-ms-tags` as a comma-separated list of `key=value` pairs, e.g.
|
||||
`x-ms-tags=env=dev,team=sync`. These are applied as blob tags on upload and on
|
||||
server-side copies. Whitespace around keys/values is ignored.
|
||||
- Modtime override: Provide `mtime` in RFC3339/RFC3339Nano format to override the
|
||||
stored modtime persisted in user metadata. If `mtime` cannot be parsed, rclone
|
||||
logs a debug message and ignores the override.
|
||||
|
||||
Notes:
|
||||
- Rclone ignores reserved `x-ms-*` keys (except `x-ms-tags`) for user metadata.
|
||||
|
||||
### Performance
|
||||
|
||||
When uploading large files, increasing the value of
|
||||
|
||||
@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
|
||||
All copy commands send the following 4 requests:
|
||||
|
||||
```text
|
||||
/b2api/v1/b2_authorize_account
|
||||
/b2api/v4/b2_authorize_account
|
||||
/b2api/v1/b2_create_bucket
|
||||
/b2api/v1/b2_list_buckets
|
||||
/b2api/v1/b2_list_file_names
|
||||
|
||||
@@ -26,7 +26,7 @@ description: "Rclone Changelog"
|
||||
- [rclone test speed](/commands/rclone_test_speed/): Add command to test a specified remotes speed (dougal)
|
||||
- New Features
|
||||
- backends: many backends have has a paged listing (`ListP`) interface added
|
||||
- this enables progress when listing large directories and reduced memory usage
|
||||
- this enables progress when listing large directories and reduced memory usage
|
||||
- build
|
||||
- Bump golang.org/x/crypto from 0.43.0 to 0.45.0 to fix CVE-2025-58181 (dependabot[bot])
|
||||
- Modernize code and tests (Nick Craig-Wood, russcoss, juejinyuxitu, reddaisyy, dulanting, Oleksandr Redko)
|
||||
|
||||
@@ -265,7 +265,7 @@ account key" button.
|
||||
`https://www.googleapis.com/auth/drive`
|
||||
to grant read/write access to Google Drive specifically.
|
||||
You can also use `https://www.googleapis.com/auth/drive.readonly` for read
|
||||
only access.
|
||||
only access with `--drive-scope=drive.readonly`.
|
||||
- Click "Authorise"
|
||||
|
||||
##### 3. Configure rclone, assuming a new install
|
||||
|
||||
@@ -14,6 +14,9 @@ managing files in the cloud easy. Its cross-platform file backup
|
||||
services let you upload and back up files from any internet-connected
|
||||
device.
|
||||
|
||||
**Note** FileLu now has a fully featured S3 backend [FileLu S5](/s3#filelu-s5),
|
||||
an industry standard S3 compatible object store.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to make a remote called `filelu`. First, run:
|
||||
|
||||
@@ -745,6 +745,68 @@ If none of these option actually end up providing `rclone` with AWS
|
||||
credentials then S3 interaction will be non-authenticated (see the
|
||||
[anonymous access](#anonymous-access) section for more info).
|
||||
|
||||
#### Assume Role (Cross-Account Access)
|
||||
|
||||
If you need to access S3 resources in a different AWS account, you can use IAM role assumption.
|
||||
This is useful for cross-account access scenarios where you have credentials in one account
|
||||
but need to access resources in another account.
|
||||
|
||||
To use assume role, configure the following parameters:
|
||||
|
||||
- `role_arn` - The ARN (Amazon Resource Name) of the IAM role to assume in the target account.
|
||||
Format: `arn:aws:iam::ACCOUNT-ID:role/ROLE-NAME`
|
||||
- `role_session_name` (optional) - A name for the assumed role session. If not specified,
|
||||
rclone will generate one automatically.
|
||||
- `role_session_duration` (optional) - Duration for which the assumed role credentials are valid.
|
||||
If not specified, AWS default duration will be used (typically 1 hour).
|
||||
- `role_external_id` (optional) - An external ID required by the role's trust policy for additional security.
|
||||
This is typically used when the role is accessed by a third party.
|
||||
|
||||
The assume role feature works with both direct credentials (`env_auth = false`) and environment-based
|
||||
authentication (`env_auth = true`). Rclone will first authenticate using the base credentials, then
|
||||
use those credentials to assume the specified role.
|
||||
|
||||
Example configuration for cross-account access:
|
||||
|
||||
```
|
||||
[s3-cross-account]
|
||||
type = s3
|
||||
provider = AWS
|
||||
env_auth = true
|
||||
region = us-east-1
|
||||
role_arn = arn:aws:iam::123456789012:role/CrossAccountS3Role
|
||||
role_session_name = rclone-session
|
||||
role_external_id = unique-role-external-id-12345
|
||||
```
|
||||
|
||||
In this example:
|
||||
- Base credentials are obtained from the environment (IAM role, credentials file, or environment variables)
|
||||
- These credentials are then used to assume the role `CrossAccountS3Role` in account `123456789012`
|
||||
- An external ID is provided for additional security as required by the role's trust policy
|
||||
|
||||
The target role's trust policy in the destination account must allow the source account or user to assume it.
|
||||
Example trust policy:
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::SOURCE-ACCOUNT-ID:root"
|
||||
},
|
||||
"Action": "sts:AssumeRole",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"sts:ExternalID": "unique-role-external-id-12345"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### S3 Permissions
|
||||
|
||||
When using the `sync` subcommand of `rclone` the following minimum
|
||||
|
||||
@@ -19,8 +19,9 @@ Thank you to our sponsors:
|
||||
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}
|
||||
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
|
||||
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneview-banner.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
|
||||
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://github.com/rclone-ui/rclone-ui">}}
|
||||
{{< sponsor src="/img/logos/shade.svg" width="300" height="200" title="Visit our sponsor Shade" link="https://shade.inc">}}
|
||||
{{< sponsor src="/img/logos/filelu-rclone.svg" width="300" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
|
||||
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}
|
||||
{{< sponsor src="/img/logos/spectra-logic.svg" width="300" height="200" title="Visit our sponsor Spectra Logic" link="https://spectralogic.com/">}}
|
||||
|
||||
@@ -14,13 +14,12 @@
|
||||
Platinum Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_300x114.png"></a><br />
|
||||
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_website.png"></a><br />
|
||||
<script>
|
||||
const imgs = [
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_300x114.png" },
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_300x114.png" },
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_3_300x114.png" },
|
||||
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_300x114.png" },
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_website.png" },
|
||||
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_website.png" },
|
||||
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_website.png" },
|
||||
];
|
||||
const img = imgs[Math.floor(Math.random() * imgs.length)];
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
@@ -67,6 +66,14 @@
|
||||
<a href="https://github.com/rclone-ui/rclone-ui" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-header">
|
||||
Silver Sponsor
|
||||
</div>
|
||||
<div class="card-body">
|
||||
<a href="https://shade.inc/" target="_blank" rel="noopener" title="Visit rclone's sponsor Shade"><img style="max-width: 100%; height: auto;" src="/img/logos/shade.svg"></a><br />
|
||||
</div>
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
<div class="card">
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
<a class="dropdown-item" href="/fichier/"><i class="fa fa-archive fa-fw"></i> 1Fichier</a>
|
||||
<a class="dropdown-item" href="/netstorage/"><i class="fas fa-database fa-fw"></i> Akamai NetStorage</a>
|
||||
<a class="dropdown-item" href="/alias/"><i class="fa fa-link fa-fw"></i> Alias</a>
|
||||
<a class="dropdown-item" href="/s3/"><i class="fab fa-amazon fa-fw"></i> Amazon S3</a>
|
||||
<a class="dropdown-item" href="/s3/"><i class="fab fa-amazon fa-fw"></i> Amazon S3 Storage Providers</a>
|
||||
<a class="dropdown-item" href="/archive/"><i class="fa fa-archive fa-fw"></i> Archive</a>
|
||||
<a class="dropdown-item" href="/b2/"><i class="fa fa-fire fa-fw"></i> Backblaze B2</a>
|
||||
<a class="dropdown-item" href="/box/"><i class="fa fa-archive fa-fw"></i> Box</a>
|
||||
@@ -69,6 +69,7 @@
|
||||
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
|
||||
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
|
||||
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>
|
||||
<a class="dropdown-item" href="/s3/#filelu-s5"><i class="fa fa-folder fa-fw"></i> FileLu S5 (S3-Compatible)</a>
|
||||
<a class="dropdown-item" href="/filescom/"><i class="fa fa-brands fa-files-pinwheel fa-fw"></i> Files.com</a>
|
||||
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a>
|
||||
<a class="dropdown-item" href="/gofile/"><i class="fa fa-folder fa-fw"></i> Gofile</a>
|
||||
@@ -87,7 +88,7 @@
|
||||
<a class="dropdown-item" href="/linkbox/"><i class="fa fa-infinity fa-fw"></i> Linkbox</a>
|
||||
<a class="dropdown-item" href="/mailru/"><i class="fa fa-at fa-fw"></i> Mail.ru Cloud</a>
|
||||
<a class="dropdown-item" href="/mega/"><i class="fa fa-archive fa-fw"></i> Mega</a>
|
||||
<a class="dropdown-item" href="/s3/#mega"><i class="fa fa-archive fa-fw"></i> Mega S4</a>
|
||||
<a class="dropdown-item" href="/s3/#mega"><i class="fa fa-archive fa-fw"></i> Mega S4 (S3-Compatible)</a>
|
||||
<a class="dropdown-item" href="/memory/"><i class="fas fa-memory fa-fw"></i> Memory</a>
|
||||
<a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Blob Storage</a>
|
||||
<a class="dropdown-item" href="/azurefiles/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Files Storage</a>
|
||||
|
||||
@@ -1 +1 @@
|
||||
v1.72.0
|
||||
v1.73.0
|
||||
@@ -29,16 +29,16 @@ func (bp *BwPair) String() string {
|
||||
// Set the bandwidth from a string which is either
|
||||
// SizeSuffix or SizeSuffix:SizeSuffix (for tx:rx bandwidth)
|
||||
func (bp *BwPair) Set(s string) (err error) {
|
||||
colon := strings.Index(s, ":")
|
||||
before, after, ok := strings.Cut(s, ":")
|
||||
stx, srx := s, ""
|
||||
if colon >= 0 {
|
||||
stx, srx = s[:colon], s[colon+1:]
|
||||
if ok {
|
||||
stx, srx = before, after
|
||||
}
|
||||
err = bp.Tx.Set(stx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if colon < 0 {
|
||||
if !ok {
|
||||
bp.Rx = bp.Tx
|
||||
} else {
|
||||
err = bp.Rx.Set(srx)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package fs
|
||||
|
||||
// VersionTag of rclone
|
||||
var VersionTag = "v1.72.0"
|
||||
var VersionTag = "v1.73.0"
|
||||
|
||||
@@ -368,7 +368,7 @@ func Run(t *testing.T, opt *Opt) {
|
||||
}
|
||||
file1Contents string
|
||||
file1MimeType = "text/csv"
|
||||
file1Metadata = fs.Metadata{"rclone-test": "potato"}
|
||||
file1Metadata = fs.Metadata{"rclonetest": "potato"}
|
||||
file2 = fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
|
||||
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,
|
||||
|
||||
2
go.mod
2
go.mod
@@ -25,6 +25,7 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.18.21
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1
|
||||
github.com/aws/smithy-go v1.23.2
|
||||
github.com/buengese/sgzip v0.1.1
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.13.0
|
||||
@@ -133,7 +134,6 @@ require (
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
|
||||
Reference in New Issue
Block a user