1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-19 09:43:14 +00:00

Compare commits

...

46 Commits

Author SHA1 Message Date
Nick Craig-Wood
b141a553be fshttp: add --dump curl for dumping HTTP requests as curl commands 2025-12-18 17:53:24 +00:00
Nick Craig-Wood
f81cd7d279 serve s3: make errors in --s3-auth-key fatal - fixes #9044
Previously if auth keys were provided without a comma then rclone
would only log an INFO message which could mean it went on to serve
without any auth.

The parsing for environment variables was changed in v1.70.0 to make
them work properly with multiple inputs. This means the input is
treated like a mini CSV file which works well except in this case when
the input has commas. This meant `user,auth` without quotes is treated
as two key pairs `user` and `quote`. The correct syntax is
`"user,auth"`. This updates the documentation accordingly.
2025-12-18 10:17:41 +00:00
Nick Craig-Wood
1a0a4628d7 Add masrlinu to contributors 2025-12-18 10:17:41 +00:00
masrlinu
c10a4d465c pcloud: add support for real-time updates in mount
Co-authored-by: masrlinu <5259918+masrlinu@users.noreply.github.com>
2025-12-17 15:13:25 +00:00
Nick Craig-Wood
3a6e07a613 memory: add --memory-discard flag for speed testing - fixes #9037 2025-12-17 10:21:12 +00:00
Nick Craig-Wood
c36f99d343 Add vyv03354 to contributors 2025-12-17 10:21:12 +00:00
jhasse-shade
3e21a7261b shade: Fix VFS test issues 2025-12-16 17:21:22 +00:00
vyv03354
fd439fab62 docs: mention use of ListR feature in ls docs 2025-12-15 09:11:00 +01:00
dependabot[bot]
976aa6b416 build: bump actions/download-artifact from 6 to 7
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 6 to 7.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v6...v7)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '7'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-13 11:01:27 +01:00
dependabot[bot]
b3a0383ca3 build: bump actions/upload-artifact from 5 to 6
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 5 to 6.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-13 11:00:59 +01:00
dependabot[bot]
c13f129339 build: bump actions/cache from 4 to 5
Bumps [actions/cache](https://github.com/actions/cache) from 4 to 5.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](https://github.com/actions/cache/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-12 14:52:57 +01:00
vyv03354
748d8c8957 docs: reflects the fact that pCloud supports ListR 2025-12-11 20:32:53 +01:00
jbagwell-akamai
4d379efcbb S3: Linode: updated endpoints to use ISO 3166-1 alpha-2 standard
ISO 3166-1 alpha-2 standard for countries and region short name in parentheses instead of separated by another comma
2025-12-11 17:20:34 +00:00
dougal
e5e6a4b5ae sync: fix error propagation in tests (#9025)
This commit fixes the sync transform test IO errors by resetting the
error flag which stops subsequent tests failing.
2025-12-10 15:43:22 +00:00
Nick Craig-Wood
df18e8c55b Changelog updates from Version v1.72.1 2025-12-10 15:31:48 +00:00
Nick Craig-Wood
f4e17d8b0b s3: add more regions for Selectel 2025-12-10 15:31:48 +00:00
Nick Craig-Wood
e5c69511bc Add jhasse-shade to contributors 2025-12-10 15:31:48 +00:00
jhasse-shade
175d4bc553 Add Shade backend 2025-12-09 17:08:57 +00:00
Nick Craig-Wood
4851f1796c log: fix backtrace not going to the --log-file #9014
Before the log re-organisation in:

8d353039a6 log: add log rotation to --log-file

rclone would write any backtraces to the --log-file which was very
convenient for users.

This got accidentally disabled due to a typo which meant backtraces
started going to stderr even if --log-file was supplied.

This fixes the problem.
2025-12-09 16:35:07 +00:00
Nick Craig-Wood
4ff8899b2c build: fix lint warning after linter upgrade 2025-12-09 16:15:17 +00:00
Nick Craig-Wood
8f29a0b0a1 Add Jonas Tingeborn to contributors 2025-12-09 16:15:17 +00:00
Nick Craig-Wood
8b0e76e53b Add Tingsong Xu to contributors 2025-12-09 16:15:17 +00:00
Jonas Tingeborn
233fef5c4d configfile: add piped config support - fixes #9012 2025-12-08 18:42:17 +00:00
Tingsong Xu
b9586c3e03 fs/log: fix PID not included in JSON log output
When using `--log-format pid,json`, the PID was not being added to the JSON log output. This fix adds PID support to JSON logging.
2025-12-08 18:41:58 +00:00
Nick Craig-Wood
0dc0ab1330 build: adjust lint rules to exclude new errors from linter update 2025-12-08 14:45:06 +00:00
Nick Craig-Wood
a6bbdb35a0 proxy: fix error handling in tests spotted by the linter 2025-12-08 14:45:06 +00:00
Nick Craig-Wood
b33cb77b6c Add Johannes Rothe to contributors 2025-12-08 14:45:06 +00:00
Nick Craig-Wood
d51322bb5f Add Leo to contributors 2025-12-08 14:45:06 +00:00
Nick Craig-Wood
e718ab6091 Add Vladislav Tropnikov to contributors 2025-12-08 14:45:06 +00:00
Nick Craig-Wood
0a9e6e130f Add Cliff Frey to contributors 2025-12-08 14:45:06 +00:00
Nick Craig-Wood
3358b9049c Add vicerace to contributors 2025-12-08 14:45:06 +00:00
DianaNites
847734d421 b2: Fix listing root buckets with unrestricted API key
Fixes previous pull request #8978

An oversight meant that unrestricted API keys
never called b2_list_buckets,
meaning the root remote could not be listed.

The call is now made in the event there are no allowed buckets,
indicating an unrestricted API key

Fixes #9007
2025-12-04 15:55:17 +00:00
Johannes Rothe
f7b255d4ec googlecloudstorage: improve endpoint parameter docs
When specifying a custom endpoint with a subpath, there is a limitation
in the Google cloud storage integration that the subpath is ignored
during upload operations. For example with the custom endpoint
"example.org/custom/endpoint" on upload the /custom/endpoint is not
reflected.

As this is most likely an issue with the underlying API client, there is
no way to fix this in rclone. By extending the documentation at least
rclone users are made aware of this limitation.

Related forum thread: https://forum.rclone.org/t/googlecloudstorage-custom-endpoint-subpath-removed-for-upload/53059
2025-12-01 19:04:02 +00:00
Leo
24c752ed9e serve webdav: implement download-directory-as-zip
Signed-off-by: Leo <i@hardrain980.com>
2025-12-01 15:42:16 +00:00
Vladislav Tropnikov
a99d155fd4 s3: The ability to specify an IAM role for cross-account interaction 2025-11-29 13:53:00 +00:00
Cliff Frey
f72b32b470 azureblob: add metadata and tags support across upload and copy paths
This change adds first-class metadata support to the Azure Blob backend,
including headers, user metadata, tags, and modtime overrides, and wires
it through uploads and server-side copies.

There is a behavior change in that rclone will now set the "mtime"
custom metadata when doing server side copies to azure and the
`--metadata` argument is given.

- Map standard headers: cache-control, content-disposition, content-encoding,
  content-language, content-type to corresponding x-ms-blob-* HTTP headers.
- Map user metadata: any non-reserved keys (excluding x-ms-*) are sent as
  blob user metadata. Keys are normalized to lowercase for consistency.
- Support tags: parse `x-ms-tags` as a comma-separated list of key=value
  pairs and apply them on uploads and copies.
- Support mtime override: accept `mtime` in metadata (RFC3339/RFC3339Nano)
  to override the stored modtime persisted in user metadata.
2025-11-27 16:58:07 +00:00
vicerace
9be7f99bf8 refactor: use strings.Cut to simplify code
Signed-off-by: vicerace <vicerace@sohu.com>
2025-11-27 14:42:11 +00:00
Nick Craig-Wood
6858bf242e docs: note where a provider has an S3 compatible alternative 2025-11-26 12:22:48 +00:00
Nick Craig-Wood
e8c6867e4c Add Shade as sponsor 2025-11-26 12:22:48 +00:00
Nick Craig-Wood
50fbd6b049 Add Duncan Smart to contributors 2025-11-26 12:22:48 +00:00
Nick Craig-Wood
0783cab952 Add Diana to contributors 2025-11-26 12:22:48 +00:00
Duncan Smart
886ac7af1d docs: Clarify OAuth scopes for readonly Google Drive access 2025-11-24 15:58:53 +00:00
Diana
3c40238f02 b2: support authentication with new bucket restricted application keys
Backblaze has updated its b2_authorize_account API endpoint, newly created
application keys are now "multi-bucket" keys, capable of being limited to
multiple buckets. These keys can only be used with the v4 endpoint, not v1 which
returns an HTTP 400.

This commit switches authorization to the v4 endpoint, and allowing such keys to
work with any of the allowed buckets.

With multi-bucket keys, missing restricted buckets can be non-fatal.

Supports listing root with multi-bucket API keys
2025-11-24 15:46:41 +00:00
Nick Craig-Wood
46ca0dd7fe docs: update sponsor logos 2025-11-24 14:58:33 +00:00
Nick Craig-Wood
2e968e7ce0 docs: fix lint error in changelog 2025-11-21 18:23:16 +00:00
Nick Craig-Wood
1886c552db Start v1.73.0-DEV development 2025-11-21 18:23:07 +00:00
63 changed files with 3267 additions and 142 deletions

View File

@@ -229,7 +229,7 @@ jobs:
cache: false
- name: Cache
uses: actions/cache@v4
uses: actions/cache@v5
with:
path: |
~/go/pkg/mod

View File

@@ -129,7 +129,7 @@ jobs:
- name: Load Go Build Cache for Docker
id: go-cache
uses: actions/cache@v4
uses: actions/cache@v5
with:
key: ${{ runner.os }}-${{ steps.imageos.outputs.result }}-go-${{ env.CACHE_NAME }}-${{ env.PLATFORM }}-${{ hashFiles('**/go.mod') }}-${{ hashFiles('**/go.sum') }}
restore-keys: |
@@ -183,7 +183,7 @@ jobs:
touch "/tmp/digests/${digest#sha256:}"
- name: Upload Image Digest
uses: actions/upload-artifact@v5
uses: actions/upload-artifact@v6
with:
name: digests-${{ env.PLATFORM }}
path: /tmp/digests/*
@@ -198,7 +198,7 @@ jobs:
steps:
- name: Download Image Digests
uses: actions/download-artifact@v6
uses: actions/download-artifact@v7
with:
path: /tmp/digests
pattern: digests-*

View File

@@ -17,6 +17,14 @@ linters:
#- prealloc # TODO
- revive
- unconvert
exclusions:
rules:
- linters:
- revive
text: 'var-naming: avoid meaningless package names'
- linters:
- revive
text: 'var-naming: avoid package names that conflict with Go standard library package names'
# Configure checks. Mostly using defaults but with some commented exceptions.
settings:
govet:
@@ -136,6 +144,7 @@ linters:
- name: var-naming
disabled: false
formatters:
enable:
- goimports

View File

@@ -109,6 +109,7 @@ directories to and from different cloud storage providers.
- Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel)
- Servercore Object Storage [:page_facing_up:](https://rclone.org/s3/#servercore)
- SFTP [:page_facing_up:](https://rclone.org/sftp/)
- Shade [:page_facing_up:](https://rclone.org/shade/)
- SMB / CIFS [:page_facing_up:](https://rclone.org/smb/)
- Spectra Logic [:page_facing_up:](https://rclone.org/s3/#spectralogic)
- StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)

View File

@@ -21,6 +21,7 @@ This file describes how to make the various kinds of releases
- make doc
- git status - to check for new man pages - git add them
- git commit -a -v -m "Version v1.XX.0"
- make check
- make retag
- git push origin # without --follow-tags so it doesn't push the tag if it fails
- git push --follow-tags origin

View File

@@ -1 +1 @@
v1.72.0
v1.73.0

View File

@@ -55,6 +55,7 @@ import (
_ "github.com/rclone/rclone/backend/s3"
_ "github.com/rclone/rclone/backend/seafile"
_ "github.com/rclone/rclone/backend/sftp"
_ "github.com/rclone/rclone/backend/shade"
_ "github.com/rclone/rclone/backend/sharefile"
_ "github.com/rclone/rclone/backend/sia"
_ "github.com/rclone/rclone/backend/smb"

View File

@@ -86,12 +86,56 @@ var (
metadataMu sync.Mutex
)
// system metadata keys which this backend owns
var systemMetadataInfo = map[string]fs.MetadataHelp{
"cache-control": {
Help: "Cache-Control header",
Type: "string",
Example: "no-cache",
},
"content-disposition": {
Help: "Content-Disposition header",
Type: "string",
Example: "inline",
},
"content-encoding": {
Help: "Content-Encoding header",
Type: "string",
Example: "gzip",
},
"content-language": {
Help: "Content-Language header",
Type: "string",
Example: "en-US",
},
"content-type": {
Help: "Content-Type header",
Type: "string",
Example: "text/plain",
},
"tier": {
Help: "Tier of the object",
Type: "string",
Example: "Hot",
ReadOnly: true,
},
"mtime": {
Help: "Time of last modification, read from rclone metadata",
Type: "RFC 3339",
Example: "2006-01-02T15:04:05.999999999Z07:00",
},
}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "azureblob",
Description: "Microsoft Azure Blob Storage",
NewFs: NewFs,
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
Help: `User metadata is stored as x-ms-meta- keys. Azure metadata keys are case insensitive and are always returned in lower case.`,
},
Options: []fs.Option{{
Name: "account",
Help: `Azure Storage Account Name.
@@ -810,6 +854,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
ReadMetadata: true,
WriteMetadata: true,
UserMetadata: true,
BucketBased: true,
BucketBasedRootOK: true,
SetTier: true,
@@ -1157,6 +1204,289 @@ func (o *Object) updateMetadataWithModTime(modTime time.Time) {
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
}
// parseXMsTags parses the value of the x-ms-tags header into a map.
// It expects comma-separated key=value pairs. Whitespace around keys and
// values is trimmed. Empty pairs and empty keys are rejected.
func parseXMsTags(s string) (map[string]string, error) {
if strings.TrimSpace(s) == "" {
return map[string]string{}, nil
}
out := make(map[string]string)
parts := strings.Split(s, ",")
for _, p := range parts {
p = strings.TrimSpace(p)
if p == "" {
continue
}
kv := strings.SplitN(p, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid tag %q", p)
}
k := strings.TrimSpace(kv[0])
v := strings.TrimSpace(kv[1])
if k == "" {
return nil, fmt.Errorf("invalid tag key in %q", p)
}
out[k] = v
}
return out, nil
}
// mapMetadataToAzure maps a generic metadata map to Azure HTTP headers,
// user metadata, tags and optional modTime override.
// Reserved x-ms-* keys (except x-ms-tags) are ignored for user metadata.
//
// Pass a logger to surface non-fatal parsing issues (e.g. bad mtime).
func mapMetadataToAzure(meta map[string]string, logf func(string, ...any)) (headers blob.HTTPHeaders, userMeta map[string]*string, tags map[string]string, modTime *time.Time, err error) {
if meta == nil {
return headers, nil, nil, nil, nil
}
tmp := make(map[string]string)
for k, v := range meta {
lowerKey := strings.ToLower(k)
switch lowerKey {
case "cache-control":
headers.BlobCacheControl = pString(v)
case "content-disposition":
headers.BlobContentDisposition = pString(v)
case "content-encoding":
headers.BlobContentEncoding = pString(v)
case "content-language":
headers.BlobContentLanguage = pString(v)
case "content-type":
headers.BlobContentType = pString(v)
case "x-ms-tags":
parsed, perr := parseXMsTags(v)
if perr != nil {
return headers, nil, nil, nil, perr
}
// allocate only if there are tags
if len(parsed) > 0 {
tags = parsed
}
case "mtime":
// Accept multiple layouts for tolerance
var parsed time.Time
var pErr error
for _, layout := range []string{time.RFC3339Nano, time.RFC3339, timeFormatOut} {
parsed, pErr = time.Parse(layout, v)
if pErr == nil {
modTime = &parsed
break
}
}
// Log and ignore if unparseable
if modTime == nil && logf != nil {
logf("metadata: couldn't parse mtime %q: %v", v, pErr)
}
case "tier":
// ignore - handled elsewhere
default:
// Filter out other reserved headers so they don't end up as user metadata
if strings.HasPrefix(lowerKey, "x-ms-") {
continue
}
tmp[lowerKey] = v
}
}
userMeta = toAzureMetaPtr(tmp)
return headers, userMeta, tags, modTime, nil
}
// toAzureMetaPtr converts a map[string]string to map[string]*string as used by Azure SDK
func toAzureMetaPtr(in map[string]string) map[string]*string {
if len(in) == 0 {
return nil
}
out := make(map[string]*string, len(in))
for k, v := range in {
vv := v
out[k] = &vv
}
return out
}
// assembleCopyParams prepares headers, metadata and tags for copy operations.
//
// It starts from the source properties, optionally overlays mapped metadata
// from rclone's metadata options, ensures mtime presence when mapping is
// enabled, and returns whether mapping was actually requested (hadMapping).
// assembleCopyParams prepares headers, metadata and tags for copy operations.
//
// If includeBaseMeta is true, start user metadata from the source's metadata
// and overlay mapped values. This matches multipart copy commit behavior.
// If false, only include mapped user metadata (no source baseline) which
// matches previous singlepart StartCopyFromURL semantics.
func assembleCopyParams(ctx context.Context, f *Fs, src fs.Object, srcProps *blob.GetPropertiesResponse, includeBaseMeta bool) (headers blob.HTTPHeaders, meta map[string]*string, tags map[string]string, hadMapping bool, err error) {
// Start from source properties
headers = blob.HTTPHeaders{
BlobCacheControl: srcProps.CacheControl,
BlobContentDisposition: srcProps.ContentDisposition,
BlobContentEncoding: srcProps.ContentEncoding,
BlobContentLanguage: srcProps.ContentLanguage,
BlobContentMD5: srcProps.ContentMD5,
BlobContentType: srcProps.ContentType,
}
// Optionally deep copy user metadata pointers from source. Normalise keys to
// lower-case to avoid duplicate x-ms-meta headers when we later inject/overlay
// metadata (Azure treats keys case-insensitively but Go's http.Header will
// join duplicate keys into a comma separated list, which breaks shared-key
// signing).
if includeBaseMeta && len(srcProps.Metadata) > 0 {
meta = make(map[string]*string, len(srcProps.Metadata))
for k, v := range srcProps.Metadata {
if v != nil {
vv := *v
meta[strings.ToLower(k)] = &vv
}
}
}
// Only consider mapping if metadata pipeline is enabled
if fs.GetConfig(ctx).Metadata {
mapped, mapErr := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
if mapErr != nil {
return headers, meta, nil, false, fmt.Errorf("failed to map metadata: %w", mapErr)
}
if mapped != nil {
// Map rclone metadata to Azure shapes
mappedHeaders, userMeta, mappedTags, mappedModTime, herr := mapMetadataToAzure(mapped, func(format string, args ...any) { fs.Debugf(f, format, args...) })
if herr != nil {
return headers, meta, nil, false, fmt.Errorf("metadata mapping: %w", herr)
}
hadMapping = true
// Overlay headers (only non-nil)
if mappedHeaders.BlobCacheControl != nil {
headers.BlobCacheControl = mappedHeaders.BlobCacheControl
}
if mappedHeaders.BlobContentDisposition != nil {
headers.BlobContentDisposition = mappedHeaders.BlobContentDisposition
}
if mappedHeaders.BlobContentEncoding != nil {
headers.BlobContentEncoding = mappedHeaders.BlobContentEncoding
}
if mappedHeaders.BlobContentLanguage != nil {
headers.BlobContentLanguage = mappedHeaders.BlobContentLanguage
}
if mappedHeaders.BlobContentType != nil {
headers.BlobContentType = mappedHeaders.BlobContentType
}
// Overlay user metadata
if len(userMeta) > 0 {
if meta == nil {
meta = make(map[string]*string, len(userMeta))
}
for k, v := range userMeta {
meta[k] = v
}
}
// Apply tags if any
if len(mappedTags) > 0 {
tags = mappedTags
}
// Ensure mtime present using mapped or source time
if _, ok := meta[modTimeKey]; !ok {
when := src.ModTime(ctx)
if mappedModTime != nil {
when = *mappedModTime
}
val := when.Format(time.RFC3339Nano)
if meta == nil {
meta = make(map[string]*string, 1)
}
meta[modTimeKey] = &val
}
// Ensure content-type fallback to source if not set by mapper
if headers.BlobContentType == nil {
headers.BlobContentType = srcProps.ContentType
}
} else {
// Mapping enabled but not provided: ensure mtime present based on source ModTime
if _, ok := meta[modTimeKey]; !ok {
when := src.ModTime(ctx)
val := when.Format(time.RFC3339Nano)
if meta == nil {
meta = make(map[string]*string, 1)
}
meta[modTimeKey] = &val
}
}
}
return headers, meta, tags, hadMapping, nil
}
// applyMappedMetadata applies mapped metadata and headers to the object state for uploads.
//
// It reads `--metadata`, `--metadata-set`, and `--metadata-mapper` outputs via fs.GetMetadataOptions
// and updates o.meta, o.tags and ui.httpHeaders accordingly.
func (o *Object) applyMappedMetadata(ctx context.Context, src fs.ObjectInfo, ui *uploadInfo, options []fs.OpenOption) (modTime time.Time, err error) {
// Start from the source modtime; may be overridden by metadata
modTime = src.ModTime(ctx)
// Fetch mapped metadata if --metadata is enabled
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil {
return modTime, err
}
if meta == nil {
// No metadata processing requested
return modTime, nil
}
// Map metadata using common helper
headers, userMeta, tags, mappedModTime, err := mapMetadataToAzure(meta, func(format string, args ...any) { fs.Debugf(o, format, args...) })
if err != nil {
return modTime, err
}
// Merge headers into ui
if headers.BlobCacheControl != nil {
ui.httpHeaders.BlobCacheControl = headers.BlobCacheControl
}
if headers.BlobContentDisposition != nil {
ui.httpHeaders.BlobContentDisposition = headers.BlobContentDisposition
}
if headers.BlobContentEncoding != nil {
ui.httpHeaders.BlobContentEncoding = headers.BlobContentEncoding
}
if headers.BlobContentLanguage != nil {
ui.httpHeaders.BlobContentLanguage = headers.BlobContentLanguage
}
if headers.BlobContentType != nil {
ui.httpHeaders.BlobContentType = headers.BlobContentType
}
// Apply user metadata to o.meta with a single critical section
if len(userMeta) > 0 {
metadataMu.Lock()
if o.meta == nil {
o.meta = make(map[string]string, len(userMeta))
}
for k, v := range userMeta {
if v != nil {
o.meta[k] = *v
}
}
metadataMu.Unlock()
}
// Apply tags
if len(tags) > 0 {
if o.tags == nil {
o.tags = make(map[string]string, len(tags))
}
for k, v := range tags {
o.tags[k] = v
}
}
if mappedModTime != nil {
modTime = *mappedModTime
}
return modTime, nil
}
// Returns whether file is a directory marker or not
func isDirectoryMarker(size int64, metadata map[string]*string, remote string) bool {
// Directory markers are 0 length
@@ -1951,18 +2281,19 @@ func (f *Fs) copyMultipart(ctx context.Context, remote, dstContainer, dstPath st
return nil, err
}
// Convert metadata from source object
// Prepare metadata/headers/tags for destination
// For multipart commit, include base metadata from source then overlay mapped
commitHeaders, commitMeta, commitTags, _, err := assembleCopyParams(ctx, f, src, srcProperties, true)
if err != nil {
return nil, fmt.Errorf("multipart copy: %w", err)
}
// Convert metadata from source or mapper
options := blockblob.CommitBlockListOptions{
Metadata: srcProperties.Metadata,
Tier: parseTier(f.opt.AccessTier),
HTTPHeaders: &blob.HTTPHeaders{
BlobCacheControl: srcProperties.CacheControl,
BlobContentDisposition: srcProperties.ContentDisposition,
BlobContentEncoding: srcProperties.ContentEncoding,
BlobContentLanguage: srcProperties.ContentLanguage,
BlobContentMD5: srcProperties.ContentMD5,
BlobContentType: srcProperties.ContentType,
},
Metadata: commitMeta,
Tags: commitTags,
Tier: parseTier(f.opt.AccessTier),
HTTPHeaders: &commitHeaders,
}
// Finalise the upload session
@@ -1993,10 +2324,36 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
return nil, fmt.Errorf("single part copy: source auth: %w", err)
}
// Start the copy
// Prepare mapped metadata/tags/headers if requested
options := blob.StartCopyFromURLOptions{
Tier: parseTier(f.opt.AccessTier),
}
var postHeaders *blob.HTTPHeaders
// Read source properties and assemble params; this also handles the case when mapping is disabled
srcProps, err := src.readMetaDataAlways(ctx)
if err != nil {
return nil, fmt.Errorf("single part copy: read source properties: %w", err)
}
// For singlepart copy, do not include base metadata from source in StartCopyFromURL
headers, meta, tags, hadMapping, aerr := assembleCopyParams(ctx, f, src, srcProps, false)
if aerr != nil {
return nil, fmt.Errorf("single part copy: %w", aerr)
}
// Apply tags and post-copy headers only when mapping requested changes
if len(tags) > 0 {
options.BlobTags = make(map[string]string, len(tags))
for k, v := range tags {
options.BlobTags[k] = v
}
}
if hadMapping {
// Only set metadata explicitly when mapping was requested; otherwise
// let the service copy source metadata (including mtime) automatically.
if len(meta) > 0 {
options.Metadata = meta
}
postHeaders = &headers
}
var startCopy blob.StartCopyFromURLResponse
err = f.pacer.Call(func() (bool, error) {
startCopy, err = dstBlobSVC.StartCopyFromURL(ctx, srcURL, &options)
@@ -2026,6 +2383,16 @@ func (f *Fs) copySinglepart(ctx context.Context, remote, dstContainer, dstPath s
pollTime = min(2*pollTime, time.Second)
}
// If mapper requested header changes, set them post-copy
if postHeaders != nil {
blb := f.getBlobSVC(dstContainer, dstPath)
_, setErr := blb.SetHTTPHeaders(ctx, *postHeaders, nil)
if setErr != nil {
return nil, fmt.Errorf("single part copy: failed to set headers: %w", setErr)
}
}
// Metadata (when requested) is set via StartCopyFromURL options.Metadata
return f.NewObject(ctx, remote)
}
@@ -2157,6 +2524,35 @@ func (o *Object) getMetadata() (metadata map[string]*string) {
return metadata
}
// Metadata returns metadata for an object
//
// It returns a combined view of system and user metadata.
func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
// Ensure metadata is loaded
if err := o.readMetaData(ctx); err != nil {
return nil, err
}
m := fs.Metadata{}
// System metadata we expose
if !o.modTime.IsZero() {
m["mtime"] = o.modTime.Format(time.RFC3339Nano)
}
if o.accessTier != "" {
m["tier"] = string(o.accessTier)
}
// Merge user metadata (already lower-cased keys)
metadataMu.Lock()
for k, v := range o.meta {
m[k] = v
}
metadataMu.Unlock()
return m, nil
}
// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in
//
// Sets
@@ -2995,17 +3391,19 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
// containerPath = containerPath[:len(containerPath)-1]
// }
// Update Mod time
o.updateMetadataWithModTime(src.ModTime(ctx))
if err != nil {
return ui, err
}
// Create the HTTP headers for the upload
// Start with default content-type based on source
ui.httpHeaders = blob.HTTPHeaders{
BlobContentType: pString(fs.MimeType(ctx, src)),
}
// Apply mapped metadata/headers/tags if requested
modTime, err := o.applyMappedMetadata(ctx, src, &ui, options)
if err != nil {
return ui, err
}
// Ensure mtime is set in metadata based on possibly overridden modTime
o.updateMetadataWithModTime(modTime)
// Compute the Content-MD5 of the file. As we stream all uploads it
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
if !o.fs.opt.DisableCheckSum {

View File

@@ -5,11 +5,16 @@ package azureblob
import (
"context"
"encoding/base64"
"fmt"
"net/http"
"strings"
"testing"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests"
"github.com/rclone/rclone/lib/random"
@@ -148,4 +153,417 @@ func (f *Fs) testWriteUncommittedBlocks(t *testing.T) {
func (f *Fs) InternalTest(t *testing.T) {
t.Run("Features", f.testFeatures)
t.Run("WriteUncommittedBlocks", f.testWriteUncommittedBlocks)
t.Run("Metadata", f.testMetadataPaths)
}
// helper to read blob properties for an object
func getProps(ctx context.Context, t *testing.T, o fs.Object) *blob.GetPropertiesResponse {
ao := o.(*Object)
props, err := ao.readMetaDataAlways(ctx)
require.NoError(t, err)
return props
}
// helper to assert select headers and user metadata
func assertHeadersAndMetadata(t *testing.T, props *blob.GetPropertiesResponse, want map[string]string, wantUserMeta map[string]string) {
// Headers
get := func(p *string) string {
if p == nil {
return ""
}
return *p
}
if v, ok := want["content-type"]; ok {
assert.Equal(t, v, get(props.ContentType), "content-type")
}
if v, ok := want["cache-control"]; ok {
assert.Equal(t, v, get(props.CacheControl), "cache-control")
}
if v, ok := want["content-disposition"]; ok {
assert.Equal(t, v, get(props.ContentDisposition), "content-disposition")
}
if v, ok := want["content-encoding"]; ok {
assert.Equal(t, v, get(props.ContentEncoding), "content-encoding")
}
if v, ok := want["content-language"]; ok {
assert.Equal(t, v, get(props.ContentLanguage), "content-language")
}
// User metadata (case-insensitive keys from service)
norm := make(map[string]*string, len(props.Metadata))
for kk, vv := range props.Metadata {
norm[strings.ToLower(kk)] = vv
}
for k, v := range wantUserMeta {
pv, ok := norm[strings.ToLower(k)]
if assert.True(t, ok, fmt.Sprintf("missing user metadata key %q", k)) {
if pv == nil {
assert.Equal(t, v, "", k)
} else {
assert.Equal(t, v, *pv, k)
}
} else {
// Log available keys for diagnostics
keys := make([]string, 0, len(props.Metadata))
for kk := range props.Metadata {
keys = append(keys, kk)
}
t.Logf("available user metadata keys: %v", keys)
}
}
}
// helper to read blob tags for an object
func getTagsMap(ctx context.Context, t *testing.T, o fs.Object) map[string]string {
ao := o.(*Object)
blb := ao.getBlobSVC()
resp, err := blb.GetTags(ctx, nil)
require.NoError(t, err)
out := make(map[string]string)
for _, tag := range resp.BlobTagSet {
if tag.Key != nil {
k := *tag.Key
v := ""
if tag.Value != nil {
v = *tag.Value
}
out[k] = v
}
}
return out
}
// Test metadata across different write paths
func (f *Fs) testMetadataPaths(t *testing.T) {
ctx := context.Background()
if testing.Short() {
t.Skip("skipping in short mode")
}
// Common expected metadata and headers
baseMeta := fs.Metadata{
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
// Note: Don't set content-encoding here to avoid download decoding differences
// We will set a custom user metadata key
"potato": "royal",
// and modtime
"mtime": fstest.Time("2009-05-06T04:05:06.499999999Z").Format(time.RFC3339Nano),
}
// Singlepart upload
t.Run("PutSinglepart", func(t *testing.T) {
// size less than chunk size
contents := random.String(int(f.opt.ChunkSize / 2))
item := fstest.NewItem("meta-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
// override content-type via metadata mapping
meta := fs.Metadata{}
meta.Merge(baseMeta)
meta["content-type"] = "text/plain"
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
defer func() { _ = obj.Remove(ctx) }()
props := getProps(ctx, t, obj)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
}, map[string]string{
"potato": "royal",
})
_ = http.StatusOK // keep import for parity but don't inspect RawResponse
})
// Multipart upload
t.Run("PutMultipart", func(t *testing.T) {
// size greater than chunk size to force multipart
contents := random.String(int(f.opt.ChunkSize + 1024))
item := fstest.NewItem("meta-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{}
meta.Merge(baseMeta)
meta["content-type"] = "application/json"
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/html", meta)
defer func() { _ = obj.Remove(ctx) }()
props := getProps(ctx, t, obj)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/json",
"cache-control": "no-cache",
"content-disposition": "inline",
"content-language": "en-US",
}, map[string]string{
"potato": "royal",
})
// Tags: Singlepart upload
t.Run("PutSinglepartTags", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize / 2))
item := fstest.NewItem("tags-single.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{
"x-ms-tags": "env=dev,team=sync",
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "text/plain", meta)
defer func() { _ = obj.Remove(ctx) }()
tags := getTagsMap(ctx, t, obj)
assert.Equal(t, "dev", tags["env"])
assert.Equal(t, "sync", tags["team"])
})
// Tags: Multipart upload
t.Run("PutMultipartTags", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize + 2048))
item := fstest.NewItem("tags-multipart.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
meta := fs.Metadata{
"x-ms-tags": "project=alpha,release=2025-08",
}
obj := fstests.PutTestContentsMetadata(ctx, t, f, &item, true, contents, true, "application/octet-stream", meta)
defer func() { _ = obj.Remove(ctx) }()
tags := getTagsMap(ctx, t, obj)
assert.Equal(t, "alpha", tags["project"])
assert.Equal(t, "2025-08", tags["release"])
})
})
// Singlepart copy with metadata-set mapping; omit content-type to exercise fallback
t.Run("CopySinglepart", func(t *testing.T) {
// create small source
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("meta-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"cache-control": "private, max-age=60",
"content-disposition": "attachment; filename=foo.txt",
"content-language": "fr",
// no content-type: should fallback to source
"potato": "maris",
}
// do copy
dstName := "meta-copy-single-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
// content-type should fallback to source (text/plain)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
"cache-control": "private, max-age=60",
"content-disposition": "attachment; filename=foo.txt",
"content-language": "fr",
}, map[string]string{
"potato": "maris",
})
// mtime should be populated on copy when --metadata is used
// and should equal the source ModTime (RFC3339Nano)
// Read user metadata (case-insensitive)
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
// parse and compare times ignoring formatting differences
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// CopySinglepart with only --metadata (no MetadataSet) must inject mtime and preserve src content-type
t.Run("CopySinglepart_MetadataOnly", func(t *testing.T) {
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("meta-copy-single-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
dstName := "meta-copy-single-only-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "text/plain",
}, map[string]string{})
// Assert mtime injected
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// Multipart copy with metadata-set mapping; omit content-type to exercise fallback
t.Run("CopyMultipart", func(t *testing.T) {
// create large source to force multipart
contents := random.String(int(f.opt.CopyCutoff + 1024))
srcItem := fstest.NewItem("meta-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"cache-control": "max-age=0, no-cache",
// omit content-type to trigger fallback
"content-language": "de",
"potato": "desiree",
}
dstName := "meta-copy-multi-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
// content-type should fallback to source (application/octet-stream)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/octet-stream",
"cache-control": "max-age=0, no-cache",
"content-language": "de",
}, map[string]string{
"potato": "desiree",
})
// mtime should be populated on copy when --metadata is used
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// CopyMultipart with only --metadata must inject mtime and preserve src content-type
t.Run("CopyMultipart_MetadataOnly", func(t *testing.T) {
contents := random.String(int(f.opt.CopyCutoff + 2048))
srcItem := fstest.NewItem("meta-copy-multi-only-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
dstName := "meta-copy-multi-only-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
props := getProps(ctx2, t, dst)
assertHeadersAndMetadata(t, props, map[string]string{
"content-type": "application/octet-stream",
}, map[string]string{})
m := props.Metadata
var gotMtime string
for k, v := range m {
if strings.EqualFold(k, "mtime") && v != nil {
gotMtime = *v
break
}
}
if assert.NotEmpty(t, gotMtime, "mtime not set on destination metadata") {
parsed, err := time.Parse(time.RFC3339Nano, gotMtime)
require.NoError(t, err)
assert.True(t, srcObj.ModTime(ctx2).Equal(parsed), "dst mtime should equal src ModTime")
}
})
// Tags: Singlepart copy
t.Run("CopySinglepartTags", func(t *testing.T) {
// create small source
contents := random.String(int(f.opt.ChunkSize / 2))
srcItem := fstest.NewItem("tags-copy-single-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "text/plain", nil)
defer func() { _ = srcObj.Remove(ctx) }()
// set mapping via MetadataSet including tags
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"x-ms-tags": "copy=single,mode=test",
}
dstName := "tags-copy-single-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
tags := getTagsMap(ctx2, t, dst)
assert.Equal(t, "single", tags["copy"])
assert.Equal(t, "test", tags["mode"])
})
// Tags: Multipart copy
t.Run("CopyMultipartTags", func(t *testing.T) {
// create large source to force multipart
contents := random.String(int(f.opt.CopyCutoff + 4096))
srcItem := fstest.NewItem("tags-copy-multi-src.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
srcObj := fstests.PutTestContentsMetadata(ctx, t, f, &srcItem, true, contents, true, "application/octet-stream", nil)
defer func() { _ = srcObj.Remove(ctx) }()
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
ci.MetadataSet = fs.Metadata{
"x-ms-tags": "copy=multi,mode=test",
}
dstName := "tags-copy-multi-dst.txt"
dst, err := f.Copy(ctx2, srcObj, dstName)
require.NoError(t, err)
defer func() { _ = dst.Remove(ctx2) }()
tags := getTagsMap(ctx2, t, dst)
assert.Equal(t, "multi", tags["copy"])
assert.Equal(t, "test", tags["mode"])
})
// Negative: invalid x-ms-tags must error
t.Run("InvalidXMsTags", func(t *testing.T) {
contents := random.String(32)
item := fstest.NewItem("tags-invalid.txt", contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
// construct ObjectInfo with invalid x-ms-tags
buf := strings.NewReader(contents)
// Build obj info with metadata
meta := fs.Metadata{
"x-ms-tags": "badpair-without-equals",
}
// force metadata on
ctx2, ci := fs.AddConfig(ctx)
ci.Metadata = true
obji := object.NewStaticObjectInfo(item.Path, item.ModTime, int64(len(contents)), true, nil, nil)
obji = obji.WithMetadata(meta).WithMimeType("text/plain")
_, err := f.Put(ctx2, buf, obji)
require.Error(t, err)
assert.Contains(t, err.Error(), "invalid tag")
})
}

View File

@@ -133,23 +133,32 @@ type File struct {
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
// StorageAPI is as returned from the b2_authorize_account call
type StorageAPI struct {
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
AccountID string `json:"accountId"` // The identifier for the account.
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
Buckets []struct { // When present, access is restricted to one or more buckets.
ID string `json:"id"` // ID of bucket
Name string `json:"name"` // When present, name of bucket - may be empty
} `json:"buckets"`
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has for every bucket.
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
} `json:"allowed"`
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
MinimumPartSize int `json:"minimumPartSize"` // DEPRECATED: This field will always have the same value as recommendedPartSize. Use recommendedPartSize instead.
RecommendedPartSize int `json:"recommendedPartSize"` // The recommended size for each part of a large file. We recommend using this part size for optimal upload performance.
}
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
type AuthorizeAccountResponse struct {
AccountID string `json:"accountId"` // The identifier for the account.
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
APIs struct { // Supported APIs for this account / key. These are API-dependent JSON objects.
Storage StorageAPI `json:"storageApi"`
} `json:"apiInfo"`
}
// ListBucketsRequest is parameters for b2_list_buckets call
type ListBucketsRequest struct {
AccountID string `json:"accountId"` // The identifier for the account.

View File

@@ -607,17 +607,29 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed to authorize account: %w", err)
}
// If this is a key limited to a single bucket, it must exist already
if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
if allowedBucket == "" {
return nil, errors.New("bucket that application key is restricted to no longer exists")
// If this is a key limited to one or more buckets, one of them must exist
// and be ours.
if f.rootBucket != "" && len(f.info.APIs.Storage.Allowed.Buckets) != 0 {
buckets := f.info.APIs.Storage.Allowed.Buckets
var rootFound = false
var rootID string
for _, b := range buckets {
allowedBucket := f.opt.Enc.ToStandardName(b.Name)
if allowedBucket == "" {
fs.Debugf(f, "bucket %q that application key is restricted to no longer exists", b.ID)
continue
}
if allowedBucket == f.rootBucket {
rootFound = true
rootID = b.ID
}
}
if allowedBucket != f.rootBucket {
return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
if !rootFound {
return nil, fmt.Errorf("you must use bucket(s) %q with this application key", buckets)
}
f.cache.MarkOK(f.rootBucket)
f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
f.setBucketID(f.rootBucket, rootID)
}
if f.rootBucket != "" && f.rootDirectory != "" {
// Check to see if the (bucket,directory) is actually an existing file
@@ -643,7 +655,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
defer f.authMu.Unlock()
opts := rest.Opts{
Method: "GET",
Path: "/b2api/v1/b2_authorize_account",
Path: "/b2api/v4/b2_authorize_account",
RootURL: f.opt.Endpoint,
UserName: f.opt.Account,
Password: f.opt.Key,
@@ -656,13 +668,13 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
if err != nil {
return fmt.Errorf("failed to authenticate: %w", err)
}
f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
f.srv.SetRoot(f.info.APIs.Storage.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
return nil
}
// hasPermission returns if the current AuthorizationToken has the selected permission
func (f *Fs) hasPermission(permission string) bool {
return slices.Contains(f.info.Allowed.Capabilities, permission)
return slices.Contains(f.info.APIs.Storage.Allowed.Capabilities, permission)
}
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
@@ -1067,44 +1079,83 @@ type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: f.info.Allowed.BucketID,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
responses := make([]api.ListBucketsResponse, len(f.info.APIs.Storage.Allowed.Buckets))[:0]
call := func(id string) error {
var account = api.ListBucketsRequest{
AccountID: f.info.AccountID,
BucketID: id,
}
if bucketName != "" && account.BucketID == "" {
account.BucketName = f.opt.Enc.FromStandardName(bucketName)
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
}
responses = append(responses, response)
return nil
}
var response api.ListBucketsResponse
opts := rest.Opts{
Method: "POST",
Path: "/b2_list_buckets",
for i := range f.info.APIs.Storage.Allowed.Buckets {
b := &f.info.APIs.Storage.Allowed.Buckets[i]
// Empty names indicate a bucket that no longer exists, this is non-fatal
// for multi-bucket API keys.
if b.Name == "" {
continue
}
// When requesting a specific bucket skip over non-matching names
if bucketName != "" && b.Name != bucketName {
continue
}
err := call(b.ID)
if err != nil {
return err
}
}
err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
return f.shouldRetry(ctx, resp, err)
})
if err != nil {
return err
if len(f.info.APIs.Storage.Allowed.Buckets) == 0 {
err := call("")
if err != nil {
return err
}
}
f.bucketIDMutex.Lock()
f.bucketTypeMutex.Lock()
f._bucketID = make(map[string]string, 1)
f._bucketType = make(map[string]string, 1)
for i := range response.Buckets {
bucket := &response.Buckets[i]
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
for ri := range responses {
response := &responses[ri]
for i := range response.Buckets {
bucket := &response.Buckets[i]
bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
f.cache.MarkOK(bucket.Name)
f._bucketID[bucket.Name] = bucket.ID
f._bucketType[bucket.Name] = bucket.Type
}
}
f.bucketTypeMutex.Unlock()
f.bucketIDMutex.Unlock()
for i := range response.Buckets {
bucket := &response.Buckets[i]
err = fn(bucket)
if err != nil {
return err
for ri := range responses {
response := &responses[ri]
for i := range response.Buckets {
bucket := &response.Buckets[i]
err := fn(bucket)
if err != nil {
return err
}
}
}
return nil
@@ -1606,7 +1657,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
bucket, bucketPath := f.split(remote)
var RootURL string
if f.opt.DownloadURL == "" {
RootURL = f.info.DownloadURL
RootURL = f.info.APIs.Storage.DownloadURL
} else {
RootURL = f.opt.DownloadURL
}
@@ -1957,7 +2008,7 @@ func (o *Object) getOrHead(ctx context.Context, method string, options []fs.Open
// Use downloadUrl from backblaze if downloadUrl is not set
// otherwise use the custom downloadUrl
if o.fs.opt.DownloadURL == "" {
opts.RootURL = o.fs.info.DownloadURL
opts.RootURL = o.fs.info.APIs.Storage.DownloadURL
} else {
opts.RootURL = o.fs.opt.DownloadURL
}

View File

@@ -403,14 +403,14 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
if ciphertext == "" {
return "", nil
}
pos := strings.Index(ciphertext, ".")
if pos == -1 {
before, after, ok := strings.Cut(ciphertext, ".")
if !ok {
return "", ErrorNotAnEncryptedFile
} // No .
num := ciphertext[:pos]
num := before
if num == "!" {
// No rotation; probably original was not valid unicode
return ciphertext[pos+1:], nil
return after, nil
}
dir, err := strconv.Atoi(num)
if err != nil {
@@ -425,7 +425,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
var result bytes.Buffer
inQuote := false
for _, runeValue := range ciphertext[pos+1:] {
for _, runeValue := range after {
switch {
case inQuote:
_, _ = result.WriteRune(runeValue)

View File

@@ -346,9 +346,26 @@ can't check the size and hash but the file contents will be decompressed.
Advanced: true,
Default: false,
}, {
Name: "endpoint",
Help: "Endpoint for the service.\n\nLeave blank normally.",
Name: "endpoint",
Help: `Custom endpoint for the storage API. Leave blank to use the provider default.
When using a custom endpoint that includes a subpath (e.g. example.org/custom/endpoint),
the subpath will be ignored during upload operations due to a limitation in the
underlying Google API Go client library.
Download and listing operations will work correctly with the full endpoint path.
If you require subpath support for uploads, avoid using subpaths in your custom
endpoint configuration.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "storage.example.org",
Help: "Specify a custom endpoint",
}, {
Value: "storage.example.org:4443",
Help: "Specifying a custom endpoint with port",
}, {
Value: "storage.example.org:4443/gcs/api",
Help: "Specifying a subpath, see the note, uploads won't use the custom path!",
}},
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,

View File

@@ -6,6 +6,7 @@ import (
"context"
"crypto/md5"
"encoding/hex"
"errors"
"fmt"
"io"
"path"
@@ -24,7 +25,8 @@ import (
var (
hashType = hash.MD5
// the object storage is persistent
buckets = newBucketsInfo()
buckets = newBucketsInfo()
errWriteOnly = errors.New("can't read when using --memory-discard")
)
// Register with Fs
@@ -33,12 +35,32 @@ func init() {
Name: "memory",
Description: "In memory object storage system.",
NewFs: NewFs,
Options: []fs.Option{},
Options: []fs.Option{{
Name: "discard",
Default: false,
Advanced: true,
Help: `If set all writes will be discarded and reads will return an error
If set then when files are uploaded the contents not be saved. The
files will appear to have been uploaded but will give an error on
read. Files will have their MD5 sum calculated on upload which takes
very little CPU time and allows the transfers to be checked.
This can be useful for testing performance.
Probably most easily used by using the connection string syntax:
:memory,discard:bucket
`,
}},
})
}
// Options defines the configuration for this backend
type Options struct{}
type Options struct {
Discard bool `config:"discard"`
}
// Fs represents a remote memory server
type Fs struct {
@@ -164,6 +186,7 @@ type objectData struct {
hash string
mimeType string
data []byte
size int64
}
// Object describes a memory object
@@ -558,7 +581,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hashType {
return "", hash.ErrUnsupported
}
if o.od.hash == "" {
if o.od.hash == "" && !o.fs.opt.Discard {
sum := md5.Sum(o.od.data)
o.od.hash = hex.EncodeToString(sum[:])
}
@@ -567,7 +590,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return int64(len(o.od.data))
return o.od.size
}
// ModTime returns the modification time of the object
@@ -593,6 +616,9 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.Discard {
return nil, errWriteOnly
}
var offset, limit int64 = 0, -1
for _, option := range options {
switch x := option.(type) {
@@ -624,13 +650,24 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
bucket, bucketPath := o.split()
data, err := io.ReadAll(in)
var data []byte
var size int64
var hash string
if o.fs.opt.Discard {
h := md5.New()
size, err = io.Copy(h, in)
hash = hex.EncodeToString(h.Sum(nil))
} else {
data, err = io.ReadAll(in)
size = int64(len(data))
}
if err != nil {
return fmt.Errorf("failed to update memory object: %w", err)
}
o.od = &objectData{
data: data,
hash: "",
size: size,
hash: hash,
modTime: src.ModTime(ctx),
mimeType: fs.MimeType(ctx, src),
}

View File

@@ -222,3 +222,11 @@ type UserInfo struct {
} `json:"steps"`
} `json:"journey"`
}
// DiffResult is the response from /diff
type DiffResult struct {
Result int `json:"result"`
DiffID int64 `json:"diffid"`
Entries []map[string]any `json:"entries"`
Error string `json:"error"`
}

View File

@@ -171,6 +171,7 @@ type Fs struct {
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *fs.Pacer // pacer for API calls
tokenRenewer *oauthutil.Renew // renew the token on expiry
lastDiffID int64 // change tracking state for diff long-polling
}
// Object describes a pcloud object
@@ -1033,6 +1034,137 @@ func (f *Fs) Shutdown(ctx context.Context) error {
return nil
}
// ChangeNotify implements fs.Features.ChangeNotify
func (f *Fs) ChangeNotify(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
// Start long-poll loop in background
go f.changeNotifyLoop(ctx, notify, ch)
}
// changeNotifyLoop contains the blocking long-poll logic.
func (f *Fs) changeNotifyLoop(ctx context.Context, notify func(string, fs.EntryType), ch <-chan time.Duration) {
// Standard polling interval
interval := 30 * time.Second
// Start with diffID = 0 to get the current state
var diffID int64
// Helper to process changes from the diff API
handleChanges := func(entries []map[string]any) {
notifiedPaths := make(map[string]bool)
for _, entry := range entries {
meta, ok := entry["metadata"].(map[string]any)
if !ok {
continue
}
// Robust extraction of ParentFolderID
var pid int64
if val, ok := meta["parentfolderid"]; ok {
switch v := val.(type) {
case float64:
pid = int64(v)
case int64:
pid = v
case int:
pid = int64(v)
}
}
// Resolve the path using dirCache.GetInv
// pCloud uses "d" prefix for directory IDs in cache, but API returns numbers
dirID := fmt.Sprintf("d%d", pid)
parentPath, ok := f.dirCache.GetInv(dirID)
if !ok {
// Parent not in cache, so we can ignore this change as it is outside
// of what the mount has seen or cares about.
continue
}
name, _ := meta["name"].(string)
fullPath := path.Join(parentPath, name)
// Determine EntryType (File or Directory)
entryType := fs.EntryObject
if isFolder, ok := meta["isfolder"].(bool); ok && isFolder {
entryType = fs.EntryDirectory
}
// Deduplicate notifications for this batch
if !notifiedPaths[fullPath] {
fs.Debugf(f, "ChangeNotify: detected change in %q (type: %v)", fullPath, entryType)
notify(fullPath, entryType)
notifiedPaths[fullPath] = true
}
}
}
for {
// Check context and channel
select {
case <-ctx.Done():
return
case newInterval, ok := <-ch:
if !ok {
return
}
interval = newInterval
default:
}
// Setup /diff Request
opts := rest.Opts{
Method: "GET",
Path: "/diff",
Parameters: url.Values{},
}
if diffID != 0 {
opts.Parameters.Set("diffid", strconv.FormatInt(diffID, 10))
opts.Parameters.Set("block", "1")
} else {
opts.Parameters.Set("last", "0")
}
// Perform Long-Poll
// Timeout set to 90s (server usually blocks for 60s max)
reqCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
var result api.DiffResult
_, err := f.srv.CallJSON(reqCtx, &opts, nil, &result)
cancel()
if err != nil {
if errors.Is(err, context.Canceled) {
return
}
// Ignore timeout errors as they are normal for long-polling
if !errors.Is(err, context.DeadlineExceeded) {
fs.Infof(f, "ChangeNotify: polling error: %v. Waiting %v.", err, interval)
time.Sleep(interval)
}
continue
}
// If result is not 0, reset DiffID to resync
if result.Result != 0 {
diffID = 0
time.Sleep(2 * time.Second)
continue
}
if result.DiffID != 0 {
diffID = result.DiffID
f.lastDiffID = diffID
}
if len(result.Entries) > 0 {
handleChanges(result.Entries)
}
}
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
// EU region supports SHA1 and SHA256 (but rclone doesn't
@@ -1401,6 +1533,7 @@ var (
_ fs.ListPer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Shutdowner = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1,26 +1,26 @@
name: Linode
description: Linode Object Storage
endpoint:
nl-ams-1.linodeobjects.com: Amsterdam (Netherlands), nl-ams-1
us-southeast-1.linodeobjects.com: Atlanta, GA (USA), us-southeast-1
in-maa-1.linodeobjects.com: Chennai (India), in-maa-1
us-ord-1.linodeobjects.com: Chicago, IL (USA), us-ord-1
eu-central-1.linodeobjects.com: Frankfurt (Germany), eu-central-1
id-cgk-1.linodeobjects.com: Jakarta (Indonesia), id-cgk-1
gb-lon-1.linodeobjects.com: London 2 (Great Britain), gb-lon-1
us-lax-1.linodeobjects.com: Los Angeles, CA (USA), us-lax-1
es-mad-1.linodeobjects.com: Madrid (Spain), es-mad-1
au-mel-1.linodeobjects.com: Melbourne (Australia), au-mel-1
us-mia-1.linodeobjects.com: Miami, FL (USA), us-mia-1
it-mil-1.linodeobjects.com: Milan (Italy), it-mil-1
us-east-1.linodeobjects.com: Newark, NJ (USA), us-east-1
jp-osa-1.linodeobjects.com: Osaka (Japan), jp-osa-1
fr-par-1.linodeobjects.com: Paris (France), fr-par-1
br-gru-1.linodeobjects.com: São Paulo (Brazil), br-gru-1
us-sea-1.linodeobjects.com: Seattle, WA (USA), us-sea-1
ap-south-1.linodeobjects.com: Singapore, ap-south-1
sg-sin-1.linodeobjects.com: Singapore 2, sg-sin-1
se-sto-1.linodeobjects.com: Stockholm (Sweden), se-sto-1
us-iad-1.linodeobjects.com: Washington, DC, (USA), us-iad-1
nl-ams-1.linodeobjects.com: Amsterdam, NL (nl-ams-1)
us-southeast-1.linodeobjects.com: Atlanta, GA, US (us-southeast-1)
in-maa-1.linodeobjects.com: Chennai, IN (in-maa-1)
us-ord-1.linodeobjects.com: Chicago, IL, US (us-ord-1)
eu-central-1.linodeobjects.com: Frankfurt, DE (eu-central-1)
id-cgk-1.linodeobjects.com: Jakarta, ID (id-cgk-1)
gb-lon-1.linodeobjects.com: London 2, UK (gb-lon-1)
us-lax-1.linodeobjects.com: Los Angeles, CA, US (us-lax-1)
es-mad-1.linodeobjects.com: Madrid, ES (es-mad-1)
us-mia-1.linodeobjects.com: Miami, FL, US (us-mia-1)
it-mil-1.linodeobjects.com: Milan, IT (it-mil-1)
us-east-1.linodeobjects.com: Newark, NJ, US (us-east-1)
jp-osa-1.linodeobjects.com: Osaka, JP (jp-osa-1)
fr-par-1.linodeobjects.com: Paris, FR (fr-par-1)
br-gru-1.linodeobjects.com: Sao Paulo, BR (br-gru-1)
us-sea-1.linodeobjects.com: Seattle, WA, US (us-sea-1)
ap-south-1.linodeobjects.com: Singapore, SG (ap-south-1)
sg-sin-1.linodeobjects.com: Singapore 2, SG (sg-sin-1)
se-sto-1.linodeobjects.com: Stockholm, SE (se-sto-1)
jp-tyo-1.linodeobjects.com: Tokyo 3, JP (jp-tyo-1)
us-iad-10.linodeobjects.com: Washington, DC, US (us-iad-10)
acl: {}
bucket_acl: true

View File

@@ -2,7 +2,17 @@ name: Selectel
description: Selectel Object Storage
region:
ru-1: St. Petersburg
ru-3: St. Petersburg
ru-7: Moscow
gis-1: Moscow
kz-1: Kazakhstan
uz-2: Uzbekistan
endpoint:
s3.ru-1.storage.selcloud.ru: Saint Petersburg
s3.ru-1.storage.selcloud.ru: St. Petersburg
s3.ru-3.storage.selcloud.ru: St. Petersburg
s3.ru-7.storage.selcloud.ru: Moscow
s3.gis-1.storage.selcloud.ru: Moscow
s3.kz-1.storage.selcloud.ru: Kazakhstan
s3.uz-2.storage.selcloud.ru: Uzbekistan
quirks:
list_url_encode: false

View File

@@ -30,9 +30,11 @@ import (
v4signer "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
@@ -325,6 +327,30 @@ If empty it will default to the environment variable "AWS_PROFILE" or
Help: "An AWS session token.",
Advanced: true,
Sensitive: true,
}, {
Name: "role_arn",
Help: `ARN of the IAM role to assume.
Leave blank if not using assume role.`,
Advanced: true,
}, {
Name: "role_session_name",
Help: `Session name for assumed role.
If empty, a session name will be generated automatically.`,
Advanced: true,
}, {
Name: "role_session_duration",
Help: `Session duration for assumed role.
If empty, the default session duration will be used.`,
Advanced: true,
}, {
Name: "role_external_id",
Help: `External ID for assumed role.
Leave blank if not using an external ID.`,
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads and copies.
@@ -927,6 +953,10 @@ type Options struct {
SharedCredentialsFile string `config:"shared_credentials_file"`
Profile string `config:"profile"`
SessionToken string `config:"session_token"`
RoleARN string `config:"role_arn"`
RoleSessionName string `config:"role_session_name"`
RoleSessionDuration fs.Duration `config:"role_session_duration"`
RoleExternalID string `config:"role_external_id"`
UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
@@ -1290,6 +1320,34 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
opt.Region = "us-east-1"
}
// Handle assume role if RoleARN is specified
if opt.RoleARN != "" {
fs.Debugf(nil, "Using assume role with ARN: %s", opt.RoleARN)
// Set region for the config before creating STS client
awsConfig.Region = opt.Region
// Create STS client using the base credentials
stsClient := sts.NewFromConfig(awsConfig)
// Configure AssumeRole options
assumeRoleOptions := func(aro *stscreds.AssumeRoleOptions) {
// Set session name if provided, otherwise use a default
if opt.RoleSessionName != "" {
aro.RoleSessionName = opt.RoleSessionName
}
if opt.RoleSessionDuration != 0 {
aro.Duration = time.Duration(opt.RoleSessionDuration)
}
if opt.RoleExternalID != "" {
aro.ExternalID = &opt.RoleExternalID
}
}
// Create AssumeRole credentials provider
awsConfig.Credentials = stscreds.NewAssumeRoleProvider(stsClient, opt.RoleARN, assumeRoleOptions)
}
provider = loadProvider(opt.Provider)
if provider == nil {
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)

View File

@@ -0,0 +1,27 @@
// Package api has type definitions for shade
package api
// ListDirResponse -------------------------------------------------
// Format from shade api
type ListDirResponse struct {
Type string `json:"type"` // "file" or "tree"
Path string `json:"path"` // Full path including root
Ino int `json:"ino"` // inode number
Mtime int64 `json:"mtime"` // Modified time in milliseconds
Ctime int64 `json:"ctime"` // Created time in milliseconds
Size int64 `json:"size"` // Size in bytes
Hash string `json:"hash"` // MD5 hash
Draft bool `json:"draft"` // Whether this is a draft file
}
// PartURL Type for multipart upload/download
type PartURL struct {
URL string `json:"url"`
Headers map[string]string `json:"headers,omitempty"`
}
// CompletedPart Type for completed parts when making a multipart upload.
type CompletedPart struct {
ETag string
PartNumber int32
}

1039
backend/shade/shade.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,21 @@
package shade_test
import (
"testing"
"github.com/rclone/rclone/backend/shade"
"github.com/rclone/rclone/fstest/fstests"
)
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
name := "TestShade"
fstests.Run(t, &fstests.Opt{
RemoteName: name + ":",
NilObject: (*shade.Object)(nil),
SkipInvalidUTF8: true,
ExtraConfig: []fstests.ExtraConfigItem{
{Name: name, Key: "eventually_consistent_delay", Value: "7"},
},
})
}

336
backend/shade/upload.go Normal file
View File

@@ -0,0 +1,336 @@
//multipart upload for shade
package shade
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"net/url"
"path"
"sort"
"sync"
"github.com/rclone/rclone/backend/shade/api"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/chunksize"
"github.com/rclone/rclone/lib/multipart"
"github.com/rclone/rclone/lib/rest"
)
var warnStreamUpload sync.Once
type shadeChunkWriter struct {
initToken string
chunkSize int64
size int64
f *Fs
o *Object
completedParts []api.CompletedPart
completedPartsMu sync.Mutex
}
// uploadMultipart handles multipart upload for larger files
func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) error {
chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
Open: o.fs,
OpenOptions: options,
})
if err != nil {
return err
}
var shadeWriter = chunkWriter.(*shadeChunkWriter)
o.size = shadeWriter.size
return nil
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
//
// Pass in the remote and the src object
// You can also use options to hint at the desired chunk size
func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
// Temporary Object under construction
o := &Object{
fs: f,
remote: remote,
}
uploadParts := f.opt.MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
size := src.Size()
fs.FixRangeOption(options, size)
// calculate size of parts
chunkSize := f.opt.ChunkSize
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 64 MB). With a maximum number of parts (10,000) this will be a file of
// 640 GB.
if size == -1 {
warnStreamUpload.Do(func() {
fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
chunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
})
} else {
chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
}
token, err := o.fs.refreshJWTToken(ctx)
if err != nil {
return info, nil, fmt.Errorf("failed to get token: %w", err)
}
err = f.ensureParentDirectories(ctx, remote)
if err != nil {
return info, nil, fmt.Errorf("failed to ensure parent directories: %w", err)
}
fullPath := remote
if f.root != "" {
fullPath = path.Join(f.root, remote)
}
// Initiate multipart upload
type initRequest struct {
Path string `json:"path"`
PartSize int64 `json:"partSize"`
}
reqBody := initRequest{
Path: fullPath,
PartSize: int64(chunkSize),
}
var initResp struct {
Token string `json:"token"`
}
opts := rest.Opts{
Method: "POST",
Path: fmt.Sprintf("/%s/upload/multipart", o.fs.drive),
RootURL: o.fs.endpoint,
ExtraHeaders: map[string]string{
"Authorization": "Bearer " + token,
},
Options: options,
}
err = o.fs.pacer.Call(func() (bool, error) {
res, err := o.fs.srv.CallJSON(ctx, &opts, reqBody, &initResp)
if err != nil {
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
}
return false, nil
})
if err != nil {
return info, nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
}
chunkWriter := &shadeChunkWriter{
initToken: initResp.Token,
chunkSize: int64(chunkSize),
size: size,
f: f,
o: o,
}
info = fs.ChunkWriterInfo{
ChunkSize: int64(chunkSize),
Concurrency: f.opt.Concurrency,
LeavePartsOnError: false,
}
return info, chunkWriter, err
}
// WriteChunk will write chunk number with reader bytes, where chunk number >= 0
func (s *shadeChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (bytesWritten int64, err error) {
token, err := s.f.refreshJWTToken(ctx)
if err != nil {
return 0, err
}
// Read chunk
var chunk bytes.Buffer
n, err := io.Copy(&chunk, reader)
if n == 0 {
return 0, nil
}
if err != nil {
return 0, fmt.Errorf("failed to read chunk: %w", err)
}
// Get presigned URL for this part
var partURL api.PartURL
partOpts := rest.Opts{
Method: "POST",
Path: fmt.Sprintf("/%s/upload/multipart/part/%d?token=%s", s.f.drive, chunkNumber+1, url.QueryEscape(s.initToken)),
RootURL: s.f.endpoint,
ExtraHeaders: map[string]string{
"Authorization": "Bearer " + token,
},
}
err = s.f.pacer.Call(func() (bool, error) {
res, err := s.f.srv.CallJSON(ctx, &partOpts, nil, &partURL)
if err != nil {
return res != nil && res.StatusCode == http.StatusTooManyRequests, err
}
return false, nil
})
if err != nil {
return 0, fmt.Errorf("failed to get part URL: %w", err)
}
opts := rest.Opts{
Method: "PUT",
RootURL: partURL.URL,
Body: &chunk,
ContentType: "",
ContentLength: &n,
}
// Add headers
var uploadRes *http.Response
if len(partURL.Headers) > 0 {
opts.ExtraHeaders = make(map[string]string)
for k, v := range partURL.Headers {
opts.ExtraHeaders[k] = v
}
}
err = s.f.pacer.Call(func() (bool, error) {
uploadRes, err = s.f.srv.Call(ctx, &opts)
if err != nil {
return uploadRes != nil && uploadRes.StatusCode == http.StatusTooManyRequests, err
}
return false, nil
})
if err != nil {
return 0, fmt.Errorf("failed to upload part %d: %w", chunk, err)
}
if uploadRes.StatusCode != http.StatusOK && uploadRes.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(uploadRes.Body)
fs.CheckClose(uploadRes.Body, &err)
return 0, fmt.Errorf("part upload failed with status %d: %s", uploadRes.StatusCode, string(body))
}
// Get ETag from response
etag := uploadRes.Header.Get("ETag")
fs.CheckClose(uploadRes.Body, &err)
s.completedPartsMu.Lock()
defer s.completedPartsMu.Unlock()
s.completedParts = append(s.completedParts, api.CompletedPart{
PartNumber: int32(chunkNumber + 1),
ETag: etag,
})
return n, nil
}
// Close complete chunked writer finalising the file.
func (s *shadeChunkWriter) Close(ctx context.Context) error {
// Complete multipart upload
sort.Slice(s.completedParts, func(i, j int) bool {
return s.completedParts[i].PartNumber < s.completedParts[j].PartNumber
})
type completeRequest struct {
Parts []api.CompletedPart `json:"parts"`
}
var completeBody completeRequest
if s.completedParts == nil {
completeBody = completeRequest{Parts: []api.CompletedPart{}}
} else {
completeBody = completeRequest{Parts: s.completedParts}
}
token, err := s.f.refreshJWTToken(ctx)
if err != nil {
return err
}
completeOpts := rest.Opts{
Method: "POST",
Path: fmt.Sprintf("/%s/upload/multipart/complete?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
RootURL: s.f.endpoint,
ExtraHeaders: map[string]string{
"Authorization": "Bearer " + token,
},
}
var response http.Response
err = s.f.pacer.Call(func() (bool, error) {
res, err := s.f.srv.CallJSON(ctx, &completeOpts, completeBody, &response)
if err != nil && res == nil {
return false, err
}
if res.StatusCode == http.StatusTooManyRequests {
return true, err // Retry on 429
}
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(res.Body)
return false, fmt.Errorf("complete multipart failed with status %d: %s", res.StatusCode, string(body))
}
return false, nil
})
if err != nil {
return fmt.Errorf("failed to complete multipart upload: %w", err)
}
return nil
}
// Abort chunk write
//
// You can and should call Abort without calling Close.
func (s *shadeChunkWriter) Abort(ctx context.Context) error {
token, err := s.f.refreshJWTToken(ctx)
if err != nil {
return err
}
opts := rest.Opts{
Method: "POST",
Path: fmt.Sprintf("/%s/upload/abort/multipart?token=%s", s.f.drive, url.QueryEscape(s.initToken)),
RootURL: s.f.endpoint,
ExtraHeaders: map[string]string{
"Authorization": "Bearer " + token,
},
}
err = s.f.pacer.Call(func() (bool, error) {
res, err := s.f.srv.Call(ctx, &opts)
if err != nil {
fs.Debugf(s.f, "Failed to abort multipart upload: %v", err)
return false, nil // Don't retry abort
}
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusCreated {
fs.Debugf(s.f, "Abort returned status %d", res.StatusCode)
}
return false, nil
})
if err != nil {
return fmt.Errorf("failed to abort multipart upload: %w", err)
}
return nil
}

View File

@@ -84,6 +84,7 @@ docs = [
"protondrive.md",
"seafile.md",
"sftp.md",
"shade.md",
"smb.md",
"storj.md",
"sugarsync.md",

View File

@@ -389,8 +389,8 @@ func parseHash(str string) (string, string, error) {
if str == "-" {
return "", "", nil
}
if pos := strings.Index(str, ":"); pos > 0 {
name, val := str[:pos], str[pos+1:]
if before, after, ok := strings.Cut(str, ":"); ok {
name, val := before, after
if name != "" && val != "" {
return name, val, nil
}

View File

@@ -26,6 +26,10 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
use |-R| to make them recurse.
List commands prefer a recursive method that uses more memory but fewer
transactions by default. Use |--disable ListR| to suppress the behavior.
See [|--fast-list|](/docs/#fast-list) for more details.
Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes).`, "|", "`")

View File

@@ -153,7 +153,7 @@ func TestRun(t *testing.T) {
fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error())
}
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
if privateKeyErr != nil {
if publicKeyError != nil {
fs.Fatal(nil, "error generating test public key "+publicKeyError.Error())
}

View File

@@ -13,6 +13,26 @@ docs](https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html)).
`--auth-key` is not provided then `serve s3` will allow anonymous
access.
Like all rclone flags `--auth-key` can be set via environment
variables, in this case `RCLONE_AUTH_KEY`. Since this flag can be
repeated, the input to `RCLONE_AUTH_KEY` is CSV encoded. Because the
`accessKey,secretKey` has a comma in, this means it needs to be in
quotes.
```console
export RCLONE_AUTH_KEY='"user,pass"'
rclone serve s3 ...
```
Or to supply multiple identities:
```console
export RCLONE_AUTH_KEY='"user1,pass1","user2,pass2"'
rclone serve s3 ...
```
Setting this variable without quotes will produce an error.
Please note that some clients may require HTTPS endpoints. See [the
SSL docs](#tls-ssl) for more information.

View File

@@ -70,6 +70,11 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
w.s3Secret = getAuthSecret(opt.AuthKey)
}
authList, err := authlistResolver(opt.AuthKey)
if err != nil {
return nil, fmt.Errorf("parsing auth list failed: %q", err)
}
var newLogger logger
w.faker = gofakes3.New(
newBackend(w),
@@ -77,7 +82,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
gofakes3.WithLogger(newLogger),
gofakes3.WithRequestID(rand.Uint64()),
gofakes3.WithoutVersioning(),
gofakes3.WithV4Auth(authlistResolver(opt.AuthKey)),
gofakes3.WithV4Auth(authList),
gofakes3.WithIntegrityCheck(true), // Check Content-MD5 if supplied
)
@@ -92,7 +97,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options, vfsOpt *vfscommon.Opt
w._vfs = vfs.New(f, vfsOpt)
if len(opt.AuthKey) > 0 {
w.faker.AddAuthKeys(authlistResolver(opt.AuthKey))
w.faker.AddAuthKeys(authList)
}
}

View File

@@ -3,6 +3,7 @@ package s3
import (
"context"
"encoding/hex"
"errors"
"io"
"os"
"path"
@@ -125,15 +126,14 @@ func rmdirRecursive(p string, VFS *vfs.VFS) {
}
}
func authlistResolver(list []string) map[string]string {
func authlistResolver(list []string) (map[string]string, error) {
authList := make(map[string]string)
for _, v := range list {
parts := strings.Split(v, ",")
if len(parts) != 2 {
fs.Infof(nil, "Ignored: invalid auth pair %s", v)
continue
return nil, errors.New("invalid auth pair: expecting a single comma")
}
authList[parts[0]] = parts[1]
}
return authList
return authList, nil
}

View File

@@ -58,10 +58,10 @@ type conn struct {
// interoperate with the rclone sftp backend
func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) {
binary, args := command, ""
space := strings.Index(command, " ")
if space >= 0 {
binary = command[:space]
args = strings.TrimLeft(command[space+1:], " ")
before, after, ok := strings.Cut(command, " ")
if ok {
binary = before
args = strings.TrimLeft(after, " ")
}
args = shellUnEscape(args)
fs.Debugf(c.what, "exec command: binary = %q, args = %q", binary, args)

View File

@@ -45,6 +45,10 @@ var OptionsInfo = fs.Options{{
Name: "disable_dir_list",
Default: false,
Help: "Disable HTML directory list on GET request for a directory",
}, {
Name: "disable_zip",
Default: false,
Help: "Disable zip download of directories",
}}.
Add(libhttp.ConfigInfo).
Add(libhttp.AuthConfigInfo).
@@ -57,6 +61,7 @@ type Options struct {
Template libhttp.TemplateConfig
EtagHash string `config:"etag_hash"`
DisableDirList bool `config:"disable_dir_list"`
DisableZip bool `config:"disable_zip"`
}
// Opt is options set by command line flags
@@ -408,6 +413,24 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
return
}
dir := node.(*vfs.Dir)
if r.URL.Query().Get("download") == "zip" && !w.opt.DisableZip {
fs.Infof(dirRemote, "%s: Zipping directory", r.RemoteAddr)
zipName := path.Base(dirRemote)
if dirRemote == "" {
zipName = "root"
}
rw.Header().Set("Content-Disposition", "attachment; filename=\""+zipName+".zip\"")
rw.Header().Set("Content-Type", "application/zip")
rw.Header().Set("Last-Modified", time.Now().UTC().Format(http.TimeFormat))
err := vfs.CreateZip(ctx, dir, rw)
if err != nil {
serve.Error(ctx, dirRemote, rw, "Failed to create zip", err)
return
}
return
}
dirEntries, err := dir.ReadDirAll()
if err != nil {
@@ -417,6 +440,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
// Make the entries for display
directory := serve.NewDirectory(dirRemote, w.server.HTMLTemplate())
directory.DisableZip = w.opt.DisableZip
for _, node := range dirEntries {
if vfscommon.Opt.NoModTime {
directory.AddHTMLEntry(node.Path(), node.IsDir(), node.Size(), time.Time{})

View File

@@ -202,6 +202,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}}
{{< provider name="Servercore Object Storage" home="https://servercore.com/services/object-storage/" config="/s3/#servercore" >}}
{{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}}
{{< provider name="Shade" home="https://shade.inc" config="/shade/" >}}
{{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}}
{{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}}
{{< provider name="Spectra Logic" home="https://spectralogic.com/blackpearl-nearline-object-gateway/" config="/s3/#spectralogic" >}}

View File

@@ -1048,3 +1048,15 @@ put them back in again. -->
- jijamik <30904953+jijamik@users.noreply.github.com>
- Dominik Sander <git@dsander.de>
- Nikolay Kiryanov <nikolay@kiryanov.ru>
- Diana <5275194+DianaNites@users.noreply.github.com>
- Duncan Smart <duncan.smart@gmail.com>
- vicerace <vicerace@sohu.com>
- Cliff Frey <cliff@openai.com>
- Vladislav Tropnikov <vtr.name@gmail.com>
- Leo <i@hardrain980.com>
- Johannes Rothe <mail@johannes-rothe.de>
- Tingsong Xu <tingsong.xu@rightcapital.com>
- Jonas Tingeborn <134889+jojje@users.noreply.github.com>
- jhasse-shade <jacob@shade.inc>
- vyv03354 <VYV03354@nifty.ne.jp>
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>

View File

@@ -103,6 +103,26 @@ MD5 hashes are stored with blobs. However blobs that were uploaded in
chunks only have an MD5 if the source remote was capable of MD5
hashes, e.g. the local disk.
### Metadata and tags
Rclone can map arbitrary metadata to Azure Blob headers, user metadata, and tags
when `--metadata` is enabled (or when using `--metadata-set` / `--metadata-mapper`).
- Headers: Set these keys in metadata to map to the corresponding blob headers:
- `cache-control`, `content-disposition`, `content-encoding`, `content-language`, `content-type`.
- User metadata: Any other non-reserved keys are written as user metadata
(keys are normalized to lowercase). Keys starting with `x-ms-` are reserved and
are not stored as user metadata.
- Tags: Provide `x-ms-tags` as a comma-separated list of `key=value` pairs, e.g.
`x-ms-tags=env=dev,team=sync`. These are applied as blob tags on upload and on
server-side copies. Whitespace around keys/values is ignored.
- Modtime override: Provide `mtime` in RFC3339/RFC3339Nano format to override the
stored modtime persisted in user metadata. If `mtime` cannot be parsed, rclone
logs a debug message and ignores the override.
Notes:
- Rclone ignores reserved `x-ms-*` keys (except `x-ms-tags`) for user metadata.
### Performance
When uploading large files, increasing the value of

View File

@@ -283,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
All copy commands send the following 4 requests:
```text
/b2api/v1/b2_authorize_account
/b2api/v4/b2_authorize_account
/b2api/v1/b2_create_bucket
/b2api/v1/b2_list_buckets
/b2api/v1/b2_list_file_names

View File

@@ -6,6 +6,22 @@ description: "Rclone Changelog"
# Changelog
## v1.72.1 - 2025-12-10
[See commits](https://github.com/rclone/rclone/compare/v1.72.0...v1.72.1)
- Bug Fixes
- build: update to go1.25.5 to fix [CVE-2025-61729](https://pkg.go.dev/vuln/GO-2025-4155)
- doc fixes (Duncan Smart, Nick Craig-Wood)
- configfile: Fix piped config support (Jonas Tingeborn)
- log
- Fix PID not included in JSON log output (Tingsong Xu)
- Fix backtrace not going to the --log-file (Nick Craig-Wood)
- Google Cloud Storage
- Improve endpoint parameter docs (Johannes Rothe)
- S3
- Add missing regions for Selectel provider (Nick Craig-Wood)
## v1.72.0 - 2025-11-21
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.72.0)
@@ -26,7 +42,7 @@ description: "Rclone Changelog"
- [rclone test speed](/commands/rclone_test_speed/): Add command to test a specified remotes speed (dougal)
- New Features
- backends: many backends have has a paged listing (`ListP`) interface added
- this enables progress when listing large directories and reduced memory usage
- this enables progress when listing large directories and reduced memory usage
- build
- Bump golang.org/x/crypto from 0.43.0 to 0.45.0 to fix CVE-2025-58181 (dependabot[bot])
- Modernize code and tests (Nick Craig-Wood, russcoss, juejinyuxitu, reddaisyy, dulanting, Oleksandr Redko)

View File

@@ -82,6 +82,7 @@ See the following for detailed instructions for
- [rsync.net](/sftp/#rsync-net)
- [Seafile](/seafile/)
- [SFTP](/sftp/)
- [Shade](/shade/)
- [Sia](/sia/)
- [SMB](/smb/)
- [Storj](/storj/)
@@ -3277,6 +3278,10 @@ The available flags are:
- `mapper` dumps the JSON blobs being sent to the program supplied with
`--metadata-mapper` and received from it. It can be useful for debugging
the metadata mapper interface.
- `curl` dumps the HTTP request as a `curl` command. Can be used with
the other HTTP debugging flags (e.g. `requests`, `bodies`). By
default the auth will be masked - use with `auth` to have the curl
commands with authentication too.
## Filtering

View File

@@ -265,7 +265,7 @@ account key" button.
`https://www.googleapis.com/auth/drive`
to grant read/write access to Google Drive specifically.
You can also use `https://www.googleapis.com/auth/drive.readonly` for read
only access.
only access with `--drive-scope=drive.readonly`.
- Click "Authorise"
##### 3. Configure rclone, assuming a new install

View File

@@ -14,6 +14,9 @@ managing files in the cloud easy. Its cross-platform file backup
services let you upload and back up files from any internet-connected
device.
**Note** FileLu now has a fully featured S3 backend [FileLu S5](/s3#filelu-s5),
an industry standard S3 compatible object store.
## Configuration
Here is an example of how to make a remote called `filelu`. First, run:

View File

@@ -59,6 +59,7 @@ Here is an overview of the major features of each cloud storage system.
| Quatrix by Maytech | - | R/W | No | No | - | - |
| Seafile | - | - | No | No | - | - |
| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
| Shade | - | - | Yes | No | - | - |
| Sia | - | - | No | No | - | - |
| SMB | - | R/W | Yes | No | - | - |
| SugarSync | - | - | No | No | - | - |
@@ -540,7 +541,7 @@ upon backend-specific capabilities.
| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
| OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No |
| Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No |
| pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
| pCloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes |
| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
| premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes |

View File

@@ -173,6 +173,31 @@ So if the folder you want rclone to use your is "My Music/", then use the return
id from ```rclone lsf``` command (ex. `dxxxxxxxx2`) as the `root_folder_id` variable
value in the config file.
### Change notifications and mounts
The pCloud backend supports realtime updates for rclone mounts via change
notifications. rclone uses pClouds diff longpolling API to detect changes and
will automatically refresh directory listings in the mounted filesystem when
changes occur.
Notes and behavior:
- Works automatically when using `rclone mount` and requires no additional
configuration.
- Notifications are directoryscoped: when rclone detects a change, it refreshes
the affected directory so new/removed/renamed files become visible promptly.
- Updates are near realtime. The backend uses a longpoll with short fallback
polling intervals, so you should see changes appear quickly without manual
refreshes.
If you want to debug or verify notifications, you can use the helper command:
```bash
rclone test changenotify remote:
```
This will log incoming change notifications for the given remote.
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/pcloud/pcloud.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options

View File

@@ -745,6 +745,68 @@ If none of these option actually end up providing `rclone` with AWS
credentials then S3 interaction will be non-authenticated (see the
[anonymous access](#anonymous-access) section for more info).
#### Assume Role (Cross-Account Access)
If you need to access S3 resources in a different AWS account, you can use IAM role assumption.
This is useful for cross-account access scenarios where you have credentials in one account
but need to access resources in another account.
To use assume role, configure the following parameters:
- `role_arn` - The ARN (Amazon Resource Name) of the IAM role to assume in the target account.
Format: `arn:aws:iam::ACCOUNT-ID:role/ROLE-NAME`
- `role_session_name` (optional) - A name for the assumed role session. If not specified,
rclone will generate one automatically.
- `role_session_duration` (optional) - Duration for which the assumed role credentials are valid.
If not specified, AWS default duration will be used (typically 1 hour).
- `role_external_id` (optional) - An external ID required by the role's trust policy for additional security.
This is typically used when the role is accessed by a third party.
The assume role feature works with both direct credentials (`env_auth = false`) and environment-based
authentication (`env_auth = true`). Rclone will first authenticate using the base credentials, then
use those credentials to assume the specified role.
Example configuration for cross-account access:
```
[s3-cross-account]
type = s3
provider = AWS
env_auth = true
region = us-east-1
role_arn = arn:aws:iam::123456789012:role/CrossAccountS3Role
role_session_name = rclone-session
role_external_id = unique-role-external-id-12345
```
In this example:
- Base credentials are obtained from the environment (IAM role, credentials file, or environment variables)
- These credentials are then used to assume the role `CrossAccountS3Role` in account `123456789012`
- An external ID is provided for additional security as required by the role's trust policy
The target role's trust policy in the destination account must allow the source account or user to assume it.
Example trust policy:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::SOURCE-ACCOUNT-ID:root"
},
"Action": "sts:AssumeRole",
"Condition": {
"StringEquals": {
"sts:ExternalID": "unique-role-external-id-12345"
}
}
}
]
}
```
### S3 Permissions
When using the `sync` subcommand of `rclone` the following minimum

218
docs/content/shade.md Normal file
View File

@@ -0,0 +1,218 @@
# {{< icon "fa fa-moon" >}} Shade
This is a backend for the [Shade](https://shade.inc/) platform
## About Shade
[Shade](https://shade.inc/) is an AI-powered cloud NAS that makes your cloud files behave like a local drive, optimized for media and creative workflows. It provides fast, secure access with natural-language search, easy sharing, and scalable cloud storage.
## Accounts & Pricing
To use this backend, you need to [create a free account](https://app.shade.inc/) on Shade. You can start with a free account and get 20GB of storage for free.
## Usage
Paths are specified as `remote:path`
Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
## Configuration
Here is an example of making a Shade configuration.
First, create a [create a free account](https://app.shade.inc/) account and choose a plan.
You will need to log in and get the `API Key` and `Drive ID` for your account from the settings section of your account and created drive respectively.
Now run
`rclone config`
Follow this interactive process:
```sh
$ rclone config
e) Edit existing remote
n) New remote
d) Delete remote
r) Rename remote
c) Copy remote
s) Set configuration password
q) Quit config
e/n/d/r/c/s/q> n
Enter name for new remote.
name> Shade
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[OTHER OPTIONS]
xx / Shade FS
\ (shade)
[OTHER OPTIONS]
Storage> xx
Option drive_id.
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
Enter a value.
drive_id> [YOUR_ID]
Option api_key.
An API key for your account.
Enter a value.
api_key> [YOUR_API_KEY]
Edit advanced config?
y) Yes
n) No (default)
y/n> n
Configuration complete.
Options:
- type: shade
- drive_id: [YOUR_ID]
- api_key: [YOUR_API_KEY]
Keep this "Shade" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
### Modification times and hashes
Shade does not support hashes and writing mod times.
### Transfers
Shade uses multipart uploads by default. This means that files will be chunked and sent up to Shade concurrently. In order to configure how many simultaneous uploads you want to use, upload the 'concurrency' option in the advanced config section. Note that this uses more memory and initiates more http requests.
### Deleting files
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
### Standard options
Here are the Standard options specific to shade (Shade FS).
#### --shade-drive-id
The ID of your drive, see this in the drive settings. Individual rclone configs must be made per drive.
Properties:
- Config: drive_id
- Env Var: RCLONE_SHADE_DRIVE_ID
- Type: string
- Required: true
#### --shade-api-key
An API key for your account. You can find this under Settings > API Keys
Properties:
- Config: api_key
- Env Var: RCLONE_SHADE_API_KEY
- Type: string
- Required: true
### Advanced options
Here are the Advanced options specific to shade (Shade FS).
#### --shade-endpoint
Endpoint for the service.
Leave blank normally.
Properties:
- Config: endpoint
- Env Var: RCLONE_SHADE_ENDPOINT
- Type: string
- Required: false
#### --shade-chunk-size
Chunk size to use for uploading.
Any files larger than this will be uploaded in chunks of this size.
Note that this is stored in memory per transfer, so increasing it will
increase memory usage.
Minimum is 5MB, maximum is 5GB.
Properties:
- Config: chunk_size
- Env Var: RCLONE_SHADE_CHUNK_SIZE
- Type: SizeSuffix
- Default: 64Mi
#### --shade-encoding
The encoding for the backend.
See the [encoding section in the overview](/overview/#encoding) for more info.
Properties:
- Config: encoding
- Env Var: RCLONE_SHADE_ENCODING
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
#### --shade-description
Description of the remote.
Properties:
- Config: description
- Env Var: RCLONE_SHADE_DESCRIPTION
- Type: string
- Required: false
{{< rem autogenerated options stop >}}
## Limitations
Note that Shade is case insensitive so you can't have a file called
"Hello.doc" and one called "hello.doc".
Shade only supports filenames up to 255 characters in length.
`rclone about` is not supported by the Shade backend. Backends without
this capability cannot determine free space for an rclone mount or
use policy `mfs` (most free space) as a member of an rclone union
remote.
See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
## Backend commands
Here are the commands specific to the shade backend.
Run them with
rclone backend COMMAND remote:
The help below will explain what arguments each command takes.
See the [backend](/commands/rclone_backend/) command for more
info on how to pass options and arguments.
These can be run on a running backend using the rc command
[backend/command](/rc/#backend-command).

View File

@@ -19,8 +19,9 @@ Thank you to our sponsors:
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}
{{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}}
{{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}}
{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
{{< sponsor src="/img/logos/rcloneview-banner.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}}
{{< sponsor src="/img/logos/rcloneui.svg" width="300" height="200" title="Visit our sponsor RcloneUI" link="https://github.com/rclone-ui/rclone-ui">}}
{{< sponsor src="/img/logos/shade.svg" width="300" height="200" title="Visit our sponsor Shade" link="https://shade.inc">}}
{{< sponsor src="/img/logos/filelu-rclone.svg" width="300" height="200" title="Visit our sponsor FileLu" link="https://filelu.com/">}}
{{< sponsor src="/img/logos/torbox.png" width="200" height="200" title="Visit our sponsor TORBOX" link="https://www.torbox.app/">}}
{{< sponsor src="/img/logos/spectra-logic.svg" width="300" height="200" title="Visit our sponsor Spectra Logic" link="https://spectralogic.com/">}}

View File

@@ -14,13 +14,12 @@
Platinum Sponsor
</div>
<div class="card-body">
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_300x114.png"></a><br />
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_website.png"></a><br />
<script>
const imgs = [
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_300x114.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_300x114.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_3_300x114.png" },
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_300x114.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_website.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_website.png" },
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_website.png" },
];
const img = imgs[Math.floor(Math.random() * imgs.length)];
document.addEventListener("DOMContentLoaded", () => {
@@ -67,6 +66,14 @@
<a href="https://github.com/rclone-ui/rclone-ui" target="_blank" rel="noopener" title="Visit rclone's sponsor rclone UI"><img src="/img/logos/rcloneui.svg"></a><br />
</div>
</div>
<div class="card">
<div class="card-header">
Silver Sponsor
</div>
<div class="card-body">
<a href="https://shade.inc/" target="_blank" rel="noopener" title="Visit rclone's sponsor Shade"><img style="max-width: 100%; height: auto;" src="/img/logos/shade.svg"></a><br />
</div>
</div>
{{end}}
<div class="card">

View File

@@ -55,7 +55,7 @@
<a class="dropdown-item" href="/fichier/"><i class="fa fa-archive fa-fw"></i> 1Fichier</a>
<a class="dropdown-item" href="/netstorage/"><i class="fas fa-database fa-fw"></i> Akamai NetStorage</a>
<a class="dropdown-item" href="/alias/"><i class="fa fa-link fa-fw"></i> Alias</a>
<a class="dropdown-item" href="/s3/"><i class="fab fa-amazon fa-fw"></i> Amazon S3</a>
<a class="dropdown-item" href="/s3/"><i class="fab fa-amazon fa-fw"></i> Amazon S3 Storage Providers</a>
<a class="dropdown-item" href="/archive/"><i class="fa fa-archive fa-fw"></i> Archive</a>
<a class="dropdown-item" href="/b2/"><i class="fa fa-fire fa-fw"></i> Backblaze B2</a>
<a class="dropdown-item" href="/box/"><i class="fa fa-archive fa-fw"></i> Box</a>
@@ -69,6 +69,7 @@
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a>
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a>
<a class="dropdown-item" href="/filelu/"><i class="fa fa-folder fa-fw"></i> FileLu Cloud Storage</a>
<a class="dropdown-item" href="/s3/#filelu-s5"><i class="fa fa-folder fa-fw"></i> FileLu S5 (S3-Compatible)</a>
<a class="dropdown-item" href="/filescom/"><i class="fa fa-brands fa-files-pinwheel fa-fw"></i> Files.com</a>
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a>
<a class="dropdown-item" href="/gofile/"><i class="fa fa-folder fa-fw"></i> Gofile</a>
@@ -87,7 +88,7 @@
<a class="dropdown-item" href="/linkbox/"><i class="fa fa-infinity fa-fw"></i> Linkbox</a>
<a class="dropdown-item" href="/mailru/"><i class="fa fa-at fa-fw"></i> Mail.ru Cloud</a>
<a class="dropdown-item" href="/mega/"><i class="fa fa-archive fa-fw"></i> Mega</a>
<a class="dropdown-item" href="/s3/#mega"><i class="fa fa-archive fa-fw"></i> Mega S4</a>
<a class="dropdown-item" href="/s3/#mega"><i class="fa fa-archive fa-fw"></i> Mega S4 (S3-Compatible)</a>
<a class="dropdown-item" href="/memory/"><i class="fas fa-memory fa-fw"></i> Memory</a>
<a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Blob Storage</a>
<a class="dropdown-item" href="/azurefiles/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Files Storage</a>
@@ -106,6 +107,7 @@
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a>
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a>
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a>
<a class="dropdown-item" href="/shade/"><i class="fa fa-moon fa-fw"></i> Shade</a>
<a class="dropdown-item" href="/smb/"><i class="fa fa-server fa-fw"></i> SMB / CIFS</a>
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a>
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a>

View File

@@ -1 +1 @@
v1.72.0
v1.73.0

View File

@@ -29,16 +29,16 @@ func (bp *BwPair) String() string {
// Set the bandwidth from a string which is either
// SizeSuffix or SizeSuffix:SizeSuffix (for tx:rx bandwidth)
func (bp *BwPair) Set(s string) (err error) {
colon := strings.Index(s, ":")
before, after, ok := strings.Cut(s, ":")
stx, srx := s, ""
if colon >= 0 {
stx, srx = s[:colon], s[colon+1:]
if ok {
stx, srx = before, after
}
err = bp.Tx.Set(stx)
if err != nil {
return err
}
if colon < 0 {
if !ok {
bp.Rx = bp.Tx
} else {
err = bp.Rx.Set(srx)

View File

@@ -2,6 +2,7 @@ package configfile
import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
@@ -362,3 +363,39 @@ func TestConfigFileSaveSymlinkAbsolute(t *testing.T) {
testSymlink(t, link, target, resolvedTarget)
})
}
type pipedInput struct {
io.Reader
}
func (p *pipedInput) Read(b []byte) (int, error) {
return p.Reader.Read(b)
}
func (*pipedInput) Seek(int64, int) (int64, error) {
return 0, fmt.Errorf("Seek not supported")
}
func TestPipedConfig(t *testing.T) {
t.Run("DoesNotSupportSeeking", func(t *testing.T) {
r := &pipedInput{strings.NewReader("")}
_, err := r.Seek(0, io.SeekStart)
require.Error(t, err)
})
t.Run("IsSupported", func(t *testing.T) {
r := &pipedInput{strings.NewReader(configData)}
_, err := config.Decrypt(r)
require.NoError(t, err)
})
t.Run("PlainTextConfigIsNotConsumedByCryptCheck", func(t *testing.T) {
in := &pipedInput{strings.NewReader(configData)}
r, _ := config.Decrypt(in)
got, err := io.ReadAll(r)
require.NoError(t, err)
assert.Equal(t, configData, string(got))
})
}

View File

@@ -77,8 +77,9 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") {
return nil, errors.New("unsupported configuration encryption - update rclone for support")
}
// Restore non-seekable plain-text stream to its original state
if _, err := b.Seek(0, io.SeekStart); err != nil {
return nil, err
return io.MultiReader(strings.NewReader(l+"\n"), r), nil
}
return b, nil
}

View File

@@ -14,6 +14,7 @@ const (
DumpGoRoutines
DumpOpenFiles
DumpMapper
DumpCurl
)
type dumpChoices struct{}
@@ -29,6 +30,7 @@ func (dumpChoices) Choices() []BitsChoicesInfo {
{uint64(DumpGoRoutines), "goroutines"},
{uint64(DumpOpenFiles), "openfiles"},
{uint64(DumpMapper), "mapper"},
{uint64(DumpCurl), "curl"},
}
}

View File

@@ -15,6 +15,8 @@ import (
"net/http/httputil"
"net/url"
"os"
"slices"
"strings"
"sync"
"time"
@@ -24,6 +26,7 @@ import (
"github.com/rclone/rclone/lib/structs"
"github.com/youmark/pkcs8"
"golang.org/x/net/publicsuffix"
"moul.io/http2curl/v2"
)
const (
@@ -439,6 +442,18 @@ func cleanAuths(buf []byte) []byte {
return buf
}
// cleanCurl gets rid of Auth headers in a curl command
func cleanCurl(cmd *http2curl.CurlCommand) {
for _, authBuf := range authBufs {
auth := "'" + string(authBuf)
for i, arg := range *cmd {
if strings.HasPrefix(arg, auth) {
(*cmd)[i] = auth + "XXXX'"
}
}
}
}
var expireWindow = 30 * time.Second
func isCertificateExpired(cc *tls.Config) bool {
@@ -492,6 +507,26 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
fs.Debugf(nil, "%s", separatorReq)
logMutex.Unlock()
}
// Dump curl request
if t.dump&(fs.DumpCurl) != 0 {
cmd, err := http2curl.GetCurlCommand(req)
if err != nil {
fs.Debugf(nil, "Failed to create curl command: %v", err)
} else {
// Patch -X HEAD into --head
for i := range len(*cmd) - 1 {
if (*cmd)[i] == "-X" && (*cmd)[i+1] == "'HEAD'" {
(*cmd)[i] = "--head"
*cmd = slices.Delete(*cmd, i+1, i+2)
break
}
}
if t.dump&fs.DumpAuth == 0 {
cleanCurl(cmd)
}
fs.Debugf(nil, "HTTP REQUEST: %v", cmd)
}
}
// Do round trip
resp, err = t.Transport.RoundTrip(req)
// Logf response

View File

@@ -19,6 +19,7 @@ import (
"github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"moul.io/http2curl/v2"
)
func TestCleanAuth(t *testing.T) {
@@ -61,6 +62,32 @@ func TestCleanAuths(t *testing.T) {
}
}
func TestCleanCurl(t *testing.T) {
for _, test := range []struct {
in []string
want []string
}{{
[]string{""},
[]string{""},
}, {
[]string{"floo"},
[]string{"floo"},
}, {
[]string{"'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'Authorization: XXXX'", "'Potato: Help'", ""},
}, {
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'X-Auth-Token: XXXX'", "'Potato: Help'", ""},
}, {
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'X-Auth-Token: XXXX'", "'Authorization: XXXX'", "'Potato: Help'", ""},
}} {
in := http2curl.CurlCommand(test.in)
cleanCurl(&in)
assert.Equal(t, test.want, test.in, test.in)
}
}
var certSerial = int64(0)
// Create a test certificate and key pair that is valid for a specific

View File

@@ -209,7 +209,7 @@ func InitLogging() {
// Log file output
if Opt.File != "" {
var w io.Writer
if Opt.MaxSize == 0 {
if Opt.MaxSize < 0 {
// No log rotation - just open the file as normal
// We'll capture tracebacks like this too.
f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640)

View File

@@ -310,6 +310,10 @@ func (h *OutputHandler) jsonLog(ctx context.Context, buf *bytes.Buffer, r slog.R
r.AddAttrs(
slog.String("source", getCaller(2)),
)
// Add PID if requested
if h.format&logFormatPid != 0 {
r.AddAttrs(slog.Int("pid", os.Getpid()))
}
h.mu.Lock()
err = h.jsonHandler.Handle(ctx, r)
if err == nil {

View File

@@ -198,6 +198,17 @@ func TestAddOutputUseJSONLog(t *testing.T) {
assert.Equal(t, "2020/01/02 03:04:05 INFO : world\n", extraText)
}
// Test JSON log includes PID when logFormatPid is set.
func TestJSONLogWithPid(t *testing.T) {
buf := &bytes.Buffer{}
h := NewOutputHandler(buf, nil, logFormatJSON|logFormatPid)
r := slog.NewRecord(t0, slog.LevelInfo, "hello", 0)
require.NoError(t, h.Handle(context.Background(), r))
output := buf.String()
assert.Contains(t, output, fmt.Sprintf(`"pid":%d`, os.Getpid()))
}
// Test WithAttrs and WithGroup return new handlers with same settings.
func TestWithAttrsAndGroup(t *testing.T) {
buf := &bytes.Buffer{}

View File

@@ -1301,6 +1301,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
err := Sync(ctx, r.Fremote, r.Flocal, false)
assert.Equal(t, fs.ErrorNotDeleting, err)
testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
accounting.GlobalStats().ResetCounters()
r.CheckLocalListing(
t,

View File

@@ -13,6 +13,7 @@ import (
_ "github.com/rclone/rclone/backend/all"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
@@ -507,6 +508,7 @@ func TestError(t *testing.T) {
err = Sync(ctx, r.Fremote, r.Flocal, true)
// testLoggerVsLsf(ctx, r.Fremote, r.Flocal, operations.GetLoggerOpt(ctx).JSON, t)
assert.Error(t, err)
accounting.GlobalStats().ResetCounters()
r.CheckLocalListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})
r.CheckRemoteListing(t, []fstest.Item{file1}, []string{"toe", "toe/toe"})

View File

@@ -1,4 +1,4 @@
package fs
// VersionTag of rclone
var VersionTag = "v1.72.0"
var VersionTag = "v1.73.0"

View File

@@ -368,7 +368,7 @@ func Run(t *testing.T, opt *Opt) {
}
file1Contents string
file1MimeType = "text/csv"
file1Metadata = fs.Metadata{"rclone-test": "potato"}
file1Metadata = fs.Metadata{"rclonetest": "potato"}
file2 = fstest.Item{
ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"),
Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`,

View File

@@ -662,6 +662,10 @@ backends:
ignoretests:
- cmd/bisync
- cmd/gitannex
- backend: "shade"
remote: "TestShade:"
fastlist: false
- backend: "archive"
remote: "TestArchive:"
fastlist: false

2
go.mod
View File

@@ -25,6 +25,7 @@ require (
github.com/aws/aws-sdk-go-v2/credentials v1.18.21
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.4
github.com/aws/aws-sdk-go-v2/service/s3 v1.90.0
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1
github.com/aws/smithy-go v1.23.2
github.com/buengese/sgzip v0.1.1
github.com/cloudinary/cloudinary-go/v2 v2.13.0
@@ -133,7 +134,6 @@ require (
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.39.1 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect