1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-24 13:23:14 +00:00

Compare commits

..

9 Commits

Author SHA1 Message Date
Nick Craig-Wood
3c95651dcb protondrive: fix crash when original size isn't known on Open - Fixes #9117 2026-01-23 17:00:43 +00:00
Nick Craig-Wood
6b9f77459a docs: fix formatting 2026-01-23 16:55:52 +00:00
Nick Craig-Wood
4b2c39c585 docs: add faq entry about re-enabling old TLS ciphers 2026-01-23 15:40:06 +00:00
Nick Craig-Wood
dbf2499d85 Add Marc-Philip to contributors 2026-01-23 15:40:06 +00:00
Nick Craig-Wood
760a134c95 Add yy to contributors 2026-01-23 15:40:06 +00:00
Enduriel
63cfe260a2 filen: swap to blake3 hashes 2026-01-22 00:11:46 +00:00
Marc-Philip
3550275cd3 docs: fix echo command syntax for password input 2026-01-18 20:56:26 +01:00
yy
b728929f44 docs: fix typos in comments and messages 2026-01-18 20:48:33 +01:00
Nick Craig-Wood
9ec918f137 docs: fix use of removed rem macro 2026-01-14 18:37:19 +00:00
26 changed files with 78 additions and 32 deletions

View File

@@ -77,7 +77,7 @@ The DOI provider can be set when rclone does not automatically recognize a suppo
Name: "doi_resolver_api_url",
Help: `The URL of the DOI resolver API to use.
The DOI resolver can be set for testing or for cases when the the canonical DOI resolver API cannot be used.
The DOI resolver can be set for testing or for cases when the canonical DOI resolver API cannot be used.
Defaults to "https://doi.org/api".`,
Required: false,

View File

@@ -226,7 +226,7 @@ func (f *Fs) Precision() time.Duration {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.SHA512)
return hash.Set(hash.BLAKE3)
}
// Features returns the optional features of this Fs
@@ -659,7 +659,7 @@ func (o *Object) Size() int64 {
// Hash returns the selected checksum of the file
// If no checksum is available it returns ""
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.SHA512 {
if ty != hash.BLAKE3 {
return "", hash.ErrUnsupported
}
if o.file.Hash == "" {

View File

@@ -403,7 +403,7 @@ This is why this flag is not set as the default.
As a rule of thumb if nearly all of your data is under rclone's root
directory (the |root/directory| in |onedrive:root/directory|) then
using this flag will be be a big performance win. If your data is
using this flag will be a big performance win. If your data is
mostly not under the root then using this flag will be a big
performance loss.

View File

@@ -904,7 +904,7 @@ func (o *Object) Storable() bool {
// Open opens the file for read. Call Close() on the returned io.ReadCloser
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
fs.FixRangeOption(options, *o.originalSize)
fs.FixRangeOption(options, o.Size())
var offset, limit int64 = 0, -1
for _, option := range options { // if the caller passes in nil for options, it will become array of nil
switch x := option.(type) {

View File

@@ -1258,7 +1258,7 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
fs.Debugf(f, "Failed to resolve path using RealPath: %v", err)
cwd, err := c.sftpClient.Getwd()
if err != nil {
fs.Debugf(f, "Failed to to read current directory - using relative paths: %v", err)
fs.Debugf(f, "Failed to read current directory - using relative paths: %v", err)
} else {
f.absRoot = path.Join(cwd, f.root)
fs.Debugf(f, "Relative path joined with current directory to get absolute path %q", f.absRoot)

View File

@@ -54,7 +54,7 @@ var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: strings.ReplaceAll(`Above this size files will be chunked.
Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
Above this size files will be chunked into a |`+segmentsContainerSuffix+`| container
or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
for more info). Default for this is 5 GiB which is its maximum value, which
means only files above this size will be chunked.

View File

@@ -341,7 +341,7 @@ func (h *testState) preconfigureServer() {
// The `\\?\` prefix tells Windows APIs to pass strings unmodified to the
// filesystem without additional parsing [1]. Our workaround is roughly to add
// the prefix to whichever parameter doesn't have it (when the OS is Windows).
// I'm not sure this generalizes, but it works for the the kinds of inputs we're
// I'm not sure this generalizes, but it works for the kinds of inputs we're
// throwing at it.
//
// [1]: https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces

View File

@@ -97,7 +97,7 @@ with the following options:
- If ` + "`--files-only`" + ` is specified then files will be returned only,
no directories.
If ` + "`--stat`" + ` is set then the the output is not an array of items,
If ` + "`--stat`" + ` is set then the output is not an array of items,
but instead a single JSON blob will be returned about the item pointed to.
This will return an error if the item isn't found, however on bucket based
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will

View File

@@ -71,7 +71,7 @@ rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=m
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
` + "```" + `
The vfsOpt are as described in options/get and can be seen in the the
The vfsOpt are as described in options/get and can be seen in the
"vfs" section when running and the mountOpt can be seen in the "mount" section:
` + "```console" + `

View File

@@ -34,7 +34,7 @@ argument by passing a hyphen as an argument. This will use the first
line of STDIN as the password not including the trailing newline.
` + "```console" + `
echo "secretpassword" | rclone obscure -
echo 'secretpassword' | rclone obscure -
` + "```" + `
If there is no data on STDIN to read, rclone obscure will default to

View File

@@ -1065,3 +1065,5 @@ put them back in again. -->
- sys6101 <csvmen@gmail.com>
- Nicolas Dessart <nds@outsight.tech>
- Qingwei Li <332664203@qq.com>
- yy <yhymmt37@gmail.com>
- Marc-Philip <marc-philip.werner@sap.com>

View File

@@ -316,3 +316,47 @@ back again when transferring to a different storage system where the
original characters are supported. When the same Unicode characters
are intentionally used in file names, this replacement strategy leads
to unwanted renames. Read more under section [caveats](/overview/#restricted-filenames-caveats).
### Why does rclone fail to connect over TLS but another client works?
If you see TLS handshake failures (or packet captures show the server
rejecting all offered ciphers), the server/proxy may only support
legacy TLS cipher suites (for example RSA key-exchange ciphers
such as `RSA_WITH_AES_256_CBC_SHA256`, or old 3DES ciphers). Recent Go
versions (which rclone is built with) have **removed insecure ciphers
from the default list**, so rclone may refuse to negotiate them even
if other tools still do.
If you can't update/reconfigure the server/proxy to support modern TLS
(TLS 1.2/1.3) and ECDHE-based cipher suites you can re-enable legacy
ciphers via `GODEBUG`:
- Windows (cmd.exe):
```bat
set GODEBUG=tlsrsakex=1
rclone copy ...
```
- Windows (PowerShell):
```powershell
$env:GODEBUG="tlsrsakex=1"
rclone copy ...
```
- Linux/macOS:
```sh
GODEBUG=tlsrsakex=1 rclone copy ...
```
If the server only supports 3DES, try:
```sh
GODEBUG=tls3des=1 rclone ...
```
This applies to **any rclone feature using TLS** (HTTPS, FTPS, WebDAV
over TLS, proxies with TLS interception, etc.). Use these workarounds
only long enough to get the server/proxy updated.

View File

@@ -83,7 +83,7 @@ y/e/d> y
### Modification times and hashes
Modification times are fully supported for files, for directories, only the creation time matters.
Filen supports SHA512 hashes.
Filen supports Blake3 hashes.
### Restricted filename characters
Invalid UTF-8 bytes will be [replaced](/overview/#invalid-utf8)
@@ -91,7 +91,7 @@ Invalid UTF-8 bytes will be [replaced](/overview/#invalid-utf8)
### API Key
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/filen/filen.go then run make backenddocs" >}}
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/filen/filen.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options
Here are the Standard options specific to filen (Filen).
@@ -241,4 +241,4 @@ Properties:
- Type: string
- Required: false
{{< rem autogenerated options stop >}}
<!-- autogenerated options stop -->

View File

@@ -27,7 +27,7 @@ Here is an overview of the major features of each cloud storage system.
| Dropbox | DBHASH ¹ | R | Yes | No | - | - |
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
| FileLu Cloud Storage | MD5 | R/W | No | Yes | R | - |
| Filen | SHA512 | R/W | Yes | No | R/W | - |
| Filen | Blake3 | R/W | Yes | No | R/W | - |
| Files.com | MD5, CRC32 | DR/W | Yes | No | R | - |
| FTP | - | R/W ¹⁰ | No | No | - | - |
| Gofile | MD5 | DR/W | No | Yes | R | - |

View File

@@ -97,7 +97,7 @@ Shade uses multipart uploads by default. This means that files will be chunked a
Please note that when deleting files in Shade via rclone it will delete the file instantly, instead of sending it to the trash. This means that it will not be recoverable.
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/box/box.go then run make backenddocs" >}}
<!-- autogenerated options start - DO NOT EDIT - instead edit fs.RegInfo in backend/shade/shade.go and run make backenddocs to verify --> <!-- markdownlint-disable-line line-length -->
### Standard options
Here are the Standard options specific to shade (Shade FS).
@@ -183,7 +183,7 @@ Properties:
- Type: string
- Required: false
{{< rem autogenerated options stop >}}
<!-- autogenerated options stop -->
## Limitations

View File

@@ -764,7 +764,7 @@ func SetCacheDir(path string) (err error) {
//
// To override the default we therefore set environment variable TMPDIR
// on Unix systems, and both TMP and TEMP on Windows (they are almost exclusively
// aliases for the same path, and programs may refer to to either of them).
// aliases for the same path, and programs may refer to either of them).
// This should make all libraries and forked processes use the same.
func SetTempDir(path string) (err error) {
var tempDir string

View File

@@ -31,7 +31,7 @@ func camelToSnake(in string) string {
//
// Builtin types are expected to be encoding as their natural
// stringificatons as produced by fmt.Sprint except for []string which
// is expected to be encoded a a CSV with empty array encoded as "".
// is expected to be encoded as a CSV with empty array encoded as "".
//
// Any other types are expected to be encoded by their String()
// methods and decoded by their `Set(s string) error` methods.
@@ -93,7 +93,7 @@ func StringToInterface(def any, in string) (newValue any, err error) {
//
// Builtin types are expected to be encoding as their natural
// stringificatons as produced by fmt.Sprint except for []string which
// is expected to be encoded a a CSV with empty array encoded as "".
// is expected to be encoded as a CSV with empty array encoded as "".
//
// Any other types are expected to be encoded by their String()
// methods and decoded by their `Set(s string) error` methods.

View File

@@ -180,7 +180,7 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) {
// GetPasswordCommand gets the password using the --password-command setting
//
// If the the --password-command flag was not in use it returns "", nil
// If the --password-command flag was not in use it returns "", nil
func GetPasswordCommand(ctx context.Context) (pass string, err error) {
ci := fs.GetConfig(ctx)
if len(ci.PasswordCommand) == 0 {

View File

@@ -225,7 +225,7 @@ func fromTypes(set Set) (map[Type]hash.Hash, error) {
// single multiwriter, where one write will update all
// the hashers.
func toMultiWriter(h map[Type]hash.Hash) io.Writer {
// Convert to to slice
// Convert to slice
var w = make([]io.Writer, 0, len(h))
for _, v := range h {
w = append(w, v)

View File

@@ -79,7 +79,7 @@ type Options struct {
File string `config:"log_file"` // Log everything to this file
MaxSize fs.SizeSuffix `config:"log_file_max_size"` // Max size of log file
MaxBackups int `config:"log_file_max_backups"` // Max backups of log file
MaxAge fs.Duration `config:"log_file_max_age"` // Max age of of log file
MaxAge fs.Duration `config:"log_file_max_age"` // Max age of log file
Compress bool `config:"log_file_compress"` // Set to compress log file
Format logFormat `config:"log_format"` // Comma separated list of log format options
UseSyslog bool `config:"syslog"` // Use Syslog for logging

View File

@@ -806,7 +806,7 @@ func TestSyncBasedOnCheckSum(t *testing.T) {
// Create a file and sync it. Change the last modified date and the
// file contents but not the size. If we're only doing sync by size
// only, we expect nothing to to be transferred on the second sync.
// only, we expect nothing to be transferred on the second sync.
func TestSyncSizeOnly(t *testing.T) {
ctx := context.Background()
ctx, ci := fs.AddConfig(ctx)
@@ -843,7 +843,7 @@ func TestSyncSizeOnly(t *testing.T) {
}
// Create a file and sync it. Keep the last modified date but change
// the size. With --ignore-size we expect nothing to to be
// the size. With --ignore-size we expect nothing to be
// transferred on the second sync.
func TestSyncIgnoreSize(t *testing.T) {
ctx := context.Background()

View File

@@ -328,7 +328,7 @@ type Flagger interface {
// satisfy as non-pointers
//
// These are from pflag.Value and need to be tested against
// non-pointer value due the the way the backend flags are inserted
// non-pointer value due to the way the backend flags are inserted
// into the flags.
type FlaggerNP interface {
String() string

4
go.mod
View File

@@ -11,7 +11,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.3
github.com/Azure/go-ntlmssp v0.0.2-0.20251110135918-10b7b7e7cd26
github.com/FilenCloudDienste/filen-sdk-go v0.0.34
github.com/FilenCloudDienste/filen-sdk-go v0.0.35
github.com/Files-com/files-sdk-go/v3 v3.2.264
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd
github.com/a1ex3/zstd-seekable-format-go/pkg v0.10.0
@@ -155,8 +155,8 @@ require (
github.com/creasty/defaults v1.8.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/dromara/dongle v1.0.1 // indirect
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.9.1 // indirect
github.com/emersion/go-message v0.18.2 // indirect

4
go.sum
View File

@@ -61,8 +61,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgv
github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/FilenCloudDienste/filen-sdk-go v0.0.34 h1:Fd/wagh/Qn35p3PkCUYubmaELATQlYGC9pxpJ9TkHUE=
github.com/FilenCloudDienste/filen-sdk-go v0.0.34/go.mod h1:XkI1Iq30/tU8vk4Zd1cKr2cCTiFqBEf0ZfG4+KKUBrY=
github.com/FilenCloudDienste/filen-sdk-go v0.0.35 h1:geuYpD/1ZXSp1H3kdW7si+KRUIrHHqM1kk8lqoA8Y9M=
github.com/FilenCloudDienste/filen-sdk-go v0.0.35/go.mod h1:0cBhKXQg49XbKZZfk5TCDa3sVLP+xMxZTWL+7KY0XR0=
github.com/Files-com/files-sdk-go/v3 v3.2.264 h1:lMHTplAYI9FtmCo/QOcpRxmPA5REVAct1r2riQmDQKw=
github.com/Files-com/files-sdk-go/v3 v3.2.264/go.mod h1:wGqkOzRu/ClJibvDgcfuJNAqI2nLhe8g91tPlDKRCdE=
github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=

View File

@@ -46,7 +46,7 @@ type Pool struct {
}
// totalMemory is a semaphore used to control total buffer usage of
// all Pools. It it may be nil in which case the total buffer usage
// all Pools. It may be nil in which case the total buffer usage
// will not be controlled. It counts memory in active use, it does not
// count memory cached in the pool.
var totalMemory *semaphore.Weighted

View File

@@ -254,7 +254,7 @@ func (wb *WriteBack) SetID(pid *Handle) {
//
// Use SetID to create Handles in advance of calling Add.
//
// If modified is false then it it doesn't cancel a pending upload if
// If modified is false then it doesn't cancel a pending upload if
// there is one as there is no need.
func (wb *WriteBack) Add(id Handle, name string, size int64, modified bool, putFn PutFn) Handle {
wb.mu.Lock()