1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-10 12:33:46 +00:00

Compare commits

..

8 Commits

Author SHA1 Message Date
Nick Craig-Wood
8bcb1bde1e fs/march: fix runtime: program exceeds 10000-thread limit
Before this change when doing a sync with `--no-traverse` and
`--files-from` we could call `NewObject` a total of `--checkers` *
`--checkers` times simultaneously.

With `--checkers 128` this can exceed the 10,000 thread limit and
fails when run on a local to local transfer because `NewObject` calls
`lstat` which is a syscall which needs an OS thread of its own.

This patch uses a weighted semaphore to limit the number of
simultaneous calls to `NewObject` to `--checkers` instead which won't
blow the 10,000 thread limit and is far more sensible use of OS
resources.

Fixes #9073
2025-12-31 18:04:10 +00:00
Nick Craig-Wood
28c187b9b4 docs: update sponsor logos 2025-12-31 17:04:11 +00:00
Nick Craig-Wood
e07afc4645 Add sys6101 to contributors 2025-12-31 17:04:11 +00:00
Nick Craig-Wood
08932ab92a Add darkdragon-001 to contributors 2025-12-31 17:04:11 +00:00
Nick Craig-Wood
356ee57edb Add vupn0712 to contributors 2025-12-31 17:04:11 +00:00
yuval-cloudinary
7c1660214d docs: add cloudinary to readme 2025-12-22 22:39:53 +01:00
darkdragon-001
51b197c86f docs: fix headers hierarchy in mount docs 2025-12-21 12:23:59 +01:00
vupn0712
029ffd2761 s3: fix Copy ignoring storage class
Co-authored-by: sys6101 <csvmen@gmail.com>
2025-12-20 14:42:00 +00:00
11 changed files with 29 additions and 90 deletions

View File

@@ -32,8 +32,9 @@ directories to and from different cloud storage providers.
- Box [:page_facing_up:](https://rclone.org/box/) - Box [:page_facing_up:](https://rclone.org/box/)
- Ceph [:page_facing_up:](https://rclone.org/s3/#ceph) - Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
- China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos) - China Mobile Ecloud Elastic Object Storage (EOS) [:page_facing_up:](https://rclone.org/s3/#china-mobile-ecloud-eos)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/) - Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
- Cloudflare R2 [:page_facing_up:](https://rclone.org/s3/#cloudflare-r2)
- Cloudinary [:page_facing_up:](https://rclone.org/cloudinary/)
- Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit) - Cubbit DS3 [:page_facing_up:](https://rclone.org/s3/#Cubbit)
- DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces) - DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
- Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage) - Digi Storage [:page_facing_up:](https://rclone.org/koofr/#digi-storage)

View File

@@ -2928,7 +2928,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
req := s3.CopyObjectInput{ req := s3.CopyObjectInput{
MetadataDirective: types.MetadataDirectiveCopy, MetadataDirective: types.MetadataDirectiveCopy,
} }
if srcObj.storageClass != nil {
req.StorageClass = types.StorageClass(*srcObj.storageClass)
}
// Build upload options including headers and metadata // Build upload options including headers and metadata
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
uploadOptions := fs.MetadataAsOpenOptions(ctx) uploadOptions := fs.MetadataAsOpenOptions(ctx)
@@ -4501,7 +4503,12 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
ACL: types.ObjectCannedACL(o.fs.opt.ACL), ACL: types.ObjectCannedACL(o.fs.opt.ACL),
Key: &bucketPath, Key: &bucketPath,
} }
if tierObj, ok := src.(fs.GetTierer); ok {
tier := tierObj.GetTier()
if tier != "" {
ui.req.StorageClass = types.StorageClass(strings.ToUpper(tier))
}
}
// Fetch metadata if --metadata is in use // Fetch metadata if --metadata is in use
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options) meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
if err != nil { if err != nil {

View File

@@ -1060,3 +1060,6 @@ put them back in again. -->
- jhasse-shade <jacob@shade.inc> - jhasse-shade <jacob@shade.inc>
- vyv03354 <VYV03354@nifty.ne.jp> - vyv03354 <VYV03354@nifty.ne.jp>
- masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com> - masrlinu <masrlinu@users.noreply.github.com> <5259918+masrlinu@users.noreply.github.com>
- vupn0712 <126212736+vupn0712@users.noreply.github.com>
- darkdragon-001 <darkdragon-001@users.noreply.github.com>
- sys6101 <csvmen@gmail.com>

View File

@@ -336,7 +336,7 @@ full new copy of the file.
When mounting with `--read-only`, attempts to write to files will fail *silently* When mounting with `--read-only`, attempts to write to files will fail *silently*
as opposed to with a clear warning as in macFUSE. as opposed to with a clear warning as in macFUSE.
# Mounting on Linux ## Mounting on Linux
On newer versions of Ubuntu, you may encounter the following error when running On newer versions of Ubuntu, you may encounter the following error when running
`rclone mount`: `rclone mount`:

View File

@@ -3278,10 +3278,6 @@ The available flags are:
- `mapper` dumps the JSON blobs being sent to the program supplied with - `mapper` dumps the JSON blobs being sent to the program supplied with
`--metadata-mapper` and received from it. It can be useful for debugging `--metadata-mapper` and received from it. It can be useful for debugging
the metadata mapper interface. the metadata mapper interface.
- `curl` dumps the HTTP request as a `curl` command. Can be used with
the other HTTP debugging flags (e.g. `requests`, `bodies`). By
default the auth will be masked - use with `auth` to have the curl
commands with authentication too.
## Filtering ## Filtering

View File

@@ -13,7 +13,7 @@ Thank you to our sponsors:
<!-- markdownlint-capture --> <!-- markdownlint-capture -->
<!-- markdownlint-disable line-length no-bare-urls --> <!-- markdownlint-disable line-length no-bare-urls -->
{{< sponsor src="/img/logos/rabata/txt_1_300x114.png" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}} {{< sponsor src="/img/logos/rabata.svg" width="300" height="200" title="Visit our sponsor Rabata.io" link="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general">}}
{{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}} {{< sponsor src="/img/logos/idrive_e2.svg" width="300" height="200" title="Visit our sponsor IDrive e2" link="https://www.idrive.com/e2/?refer=rclone">}}
{{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}} {{< sponsor src="/img/logos/filescom-enterprise-grade-workflows.png" width="300" height="200" title="Start Your Free Trial Today" link="https://files.com/?utm_source=rclone&utm_medium=referral&utm_campaign=banner&utm_term=rclone">}}
{{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}} {{< sponsor src="/img/logos/mega-s4.svg" width="300" height="200" title="MEGA S4: New S3 compatible object storage. High scale. Low cost. Free egress." link="https://mega.io/objectstorage?utm_source=rclone&utm_medium=referral&utm_campaign=rclone-mega-s4&mct=rclonepromo">}}

View File

@@ -14,20 +14,7 @@
Platinum Sponsor Platinum Sponsor
</div> </div>
<div class="card-body"> <div class="card-body">
<a id="platinum" href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img style="width: 100%; height: auto;" src="/img/logos/rabata/txt_1_website.png"></a><br /> <a href="https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general" target="_blank" rel="noopener" title="Visit rclone's sponsor Rabata.io"><img src="/img/logos/rabata.svg"></a><br />
<script>
const imgs = [
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_1_website.png" },
{ href: "https://rabata.io/?utm_source=banner&utm_medium=rclone&utm_content=general", img: "/img/logos/rabata/txt_2_website.png" },
{ href: "https://rabata.io/grant-application?utm_source=banner&utm_medium=rclone&utm_content=grant1", img: "/img/logos/rabata/100k_website.png" },
];
const img = imgs[Math.floor(Math.random() * imgs.length)];
document.addEventListener("DOMContentLoaded", () => {
const a = document.getElementById("platinum");
a.href = img.href;
a.querySelector("img").src = img.img;
});
</script>
</div> </div>
</div> </div>

View File

@@ -14,7 +14,6 @@ const (
DumpGoRoutines DumpGoRoutines
DumpOpenFiles DumpOpenFiles
DumpMapper DumpMapper
DumpCurl
) )
type dumpChoices struct{} type dumpChoices struct{}
@@ -30,7 +29,6 @@ func (dumpChoices) Choices() []BitsChoicesInfo {
{uint64(DumpGoRoutines), "goroutines"}, {uint64(DumpGoRoutines), "goroutines"},
{uint64(DumpOpenFiles), "openfiles"}, {uint64(DumpOpenFiles), "openfiles"},
{uint64(DumpMapper), "mapper"}, {uint64(DumpMapper), "mapper"},
{uint64(DumpCurl), "curl"},
} }
} }

View File

@@ -15,8 +15,6 @@ import (
"net/http/httputil" "net/http/httputil"
"net/url" "net/url"
"os" "os"
"slices"
"strings"
"sync" "sync"
"time" "time"
@@ -26,7 +24,6 @@ import (
"github.com/rclone/rclone/lib/structs" "github.com/rclone/rclone/lib/structs"
"github.com/youmark/pkcs8" "github.com/youmark/pkcs8"
"golang.org/x/net/publicsuffix" "golang.org/x/net/publicsuffix"
"moul.io/http2curl/v2"
) )
const ( const (
@@ -442,18 +439,6 @@ func cleanAuths(buf []byte) []byte {
return buf return buf
} }
// cleanCurl gets rid of Auth headers in a curl command
func cleanCurl(cmd *http2curl.CurlCommand) {
for _, authBuf := range authBufs {
auth := "'" + string(authBuf)
for i, arg := range *cmd {
if strings.HasPrefix(arg, auth) {
(*cmd)[i] = auth + "XXXX'"
}
}
}
}
var expireWindow = 30 * time.Second var expireWindow = 30 * time.Second
func isCertificateExpired(cc *tls.Config) bool { func isCertificateExpired(cc *tls.Config) bool {
@@ -507,26 +492,6 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
fs.Debugf(nil, "%s", separatorReq) fs.Debugf(nil, "%s", separatorReq)
logMutex.Unlock() logMutex.Unlock()
} }
// Dump curl request
if t.dump&(fs.DumpCurl) != 0 {
cmd, err := http2curl.GetCurlCommand(req)
if err != nil {
fs.Debugf(nil, "Failed to create curl command: %v", err)
} else {
// Patch -X HEAD into --head
for i := range len(*cmd) - 1 {
if (*cmd)[i] == "-X" && (*cmd)[i+1] == "'HEAD'" {
(*cmd)[i] = "--head"
*cmd = slices.Delete(*cmd, i+1, i+2)
break
}
}
if t.dump&fs.DumpAuth == 0 {
cleanCurl(cmd)
}
fs.Debugf(nil, "HTTP REQUEST: %v", cmd)
}
}
// Do round trip // Do round trip
resp, err = t.Transport.RoundTrip(req) resp, err = t.Transport.RoundTrip(req)
// Logf response // Logf response

View File

@@ -19,7 +19,6 @@ import (
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"moul.io/http2curl/v2"
) )
func TestCleanAuth(t *testing.T) { func TestCleanAuth(t *testing.T) {
@@ -62,32 +61,6 @@ func TestCleanAuths(t *testing.T) {
} }
} }
func TestCleanCurl(t *testing.T) {
for _, test := range []struct {
in []string
want []string
}{{
[]string{""},
[]string{""},
}, {
[]string{"floo"},
[]string{"floo"},
}, {
[]string{"'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'Authorization: XXXX'", "'Potato: Help'", ""},
}, {
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'X-Auth-Token: XXXX'", "'Potato: Help'", ""},
}, {
[]string{"'X-Auth-Token: AAAAAAAAA'", "'Authorization: AAAAAAAAA'", "'Potato: Help'", ""},
[]string{"'X-Auth-Token: XXXX'", "'Authorization: XXXX'", "'Potato: Help'", ""},
}} {
in := http2curl.CurlCommand(test.in)
cleanCurl(&in)
assert.Equal(t, test.want, test.in, test.in)
}
}
var certSerial = int64(0) var certSerial = int64(0)
// Create a test certificate and key pair that is valid for a specific // Create a test certificate and key pair that is valid for a specific

View File

@@ -16,6 +16,7 @@ import (
"github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/list"
"github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/transform" "github.com/rclone/rclone/lib/transform"
"golang.org/x/sync/semaphore"
"golang.org/x/text/unicode/norm" "golang.org/x/text/unicode/norm"
) )
@@ -41,9 +42,10 @@ type March struct {
NoCheckDest bool // transfer all objects regardless without checking dst NoCheckDest bool // transfer all objects regardless without checking dst
NoUnicodeNormalization bool // don't normalize unicode characters in filenames NoUnicodeNormalization bool // don't normalize unicode characters in filenames
// internal state // internal state
srcListDir listDirFn // function to call to list a directory in the src srcListDir listDirFn // function to call to list a directory in the src
dstListDir listDirFn // function to call to list a directory in the dst dstListDir listDirFn // function to call to list a directory in the dst
transforms []matchTransformFn transforms []matchTransformFn
newObjectSem *semaphore.Weighted // make sure we don't call too many NewObjects simultaneously
} }
// Marcher is called on each match // Marcher is called on each match
@@ -78,6 +80,8 @@ func (m *March) init(ctx context.Context) {
if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync { if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync {
m.transforms = append(m.transforms, strings.ToLower) m.transforms = append(m.transforms, strings.ToLower)
} }
// Only allow ci.Checkers simultaneous calls to NewObject
m.newObjectSem = semaphore.NewWeighted(int64(ci.Checkers))
} }
// srcOrDstKey turns a directory entry into a sort key using the defined transforms. // srcOrDstKey turns a directory entry into a sort key using the defined transforms.
@@ -461,7 +465,12 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) {
continue continue
} }
leaf := path.Base(t.src.Remote()) leaf := path.Base(t.src.Remote())
if err := m.newObjectSem.Acquire(m.Ctx, 1); err != nil {
t.dstMatch <- nil
continue
}
dst, err := m.Fdst.NewObject(m.Ctx, path.Join(job.dstRemote, leaf)) dst, err := m.Fdst.NewObject(m.Ctx, path.Join(job.dstRemote, leaf))
m.newObjectSem.Release(1)
if err != nil { if err != nil {
dst = nil dst = nil
} }