1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-21 03:43:26 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
c917d2d5b4 s3: fix --s3-versions when copying a single object
Before this change, if --s3-versions was enabled, then copying a
single object from a subdirectory would fail.

This was due to an incorrect comparison in the NewFs code.

This fixes the change and introduces a new unit tests.
2022-09-05 18:56:11 +01:00
46 changed files with 223 additions and 637 deletions

View File

@@ -97,12 +97,12 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Install Go - name: Install Go
uses: actions/setup-go@v3 uses: actions/setup-go@v2
with: with:
stable: 'false' stable: 'false'
go-version: ${{ matrix.go }} go-version: ${{ matrix.go }}
@@ -162,7 +162,7 @@ jobs:
env env
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -226,7 +226,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
- name: Code quality test - name: Code quality test
uses: golangci/golangci-lint-action@v3 uses: golangci/golangci-lint-action@v3
@@ -242,18 +242,18 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
# Upgrade together with NDK version # Upgrade together with NDK version
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v3 uses: actions/setup-go@v1
with: with:
go-version: 1.19.x go-version: 1.19.x
- name: Go module cache - name: Go module cache
uses: actions/cache@v3 uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}

View File

@@ -12,7 +12,7 @@ jobs:
name: Build image job name: Build image job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish image - name: Build and publish image

View File

@@ -11,7 +11,7 @@ jobs:
name: Build image job name: Build image job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Get actual patch version - name: Get actual patch version
@@ -40,7 +40,7 @@ jobs:
name: Build docker plugin job name: Build docker plugin job
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@v3 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Build and publish docker plugin - name: Build and publish docker plugin

View File

@@ -20,7 +20,7 @@ issues:
exclude-use-default: false exclude-use-default: false
# Maximum issues count per one linter. Set to 0 to disable. Default is 50. # Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-issues-per-linter: 0 max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3. # Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0 max-same-issues: 0

View File

@@ -49,7 +49,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3) * IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/) * Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/) * Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/) * Memset Memstore [:page_facing_up:](https://rclone.org/swift/)

View File

@@ -53,14 +53,6 @@ doing that so it may be necessary to roll back dependencies to the
version specified by `make updatedirect` in order to get rclone to version specified by `make updatedirect` in order to get rclone to
build. build.
## Tidy beta
At some point after the release run
bin/tidy-beta v1.55
where the version number is that of a couple ago to remove old beta binaries.
## Making a point release ## Making a point release
If rclone needs a point release due to some horrendous bug: If rclone needs a point release due to some horrendous bug:

View File

@@ -29,7 +29,6 @@ import (
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/operations"
) )
@@ -368,16 +367,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
meta, err := readMetadata(ctx, mo) meta := readMetadata(ctx, mo)
if err != nil { if meta == nil {
return nil, fmt.Errorf("error decoding metadata: %w", err) return nil, errors.New("error decoding metadata")
} }
// Create our Object // Create our Object
o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode)) o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode))
if err != nil { return f.newObject(o, mo, meta), err
return nil, err
}
return f.newObject(o, mo, meta), nil
} }
// checkCompressAndType checks if an object is compressible and determines it's mime type // checkCompressAndType checks if an object is compressible and determines it's mime type
@@ -681,7 +677,7 @@ func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.Ob
} }
return nil, err return nil, err
} }
return f.newObject(dataObject, mo, meta), nil return f.newObject(dataObject, mo, meta), err
} }
// Put in to the remote path with the modTime given of the given size // Put in to the remote path with the modTime given of the given size
@@ -1044,19 +1040,24 @@ func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mim
} }
// This function will read the metadata from a metadata object. // This function will read the metadata from a metadata object.
func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) { func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata) {
// Open our meradata object // Open our meradata object
rc, err := mo.Open(ctx) rc, err := mo.Open(ctx)
if err != nil { if err != nil {
return nil, err return nil
} }
defer fs.CheckClose(rc, &err) defer func() {
err := rc.Close()
if err != nil {
fs.Errorf(mo, "Error closing object: %v", err)
}
}()
jr := json.NewDecoder(rc) jr := json.NewDecoder(rc)
meta = new(ObjectMetadata) meta = new(ObjectMetadata)
if err = jr.Decode(meta); err != nil { if err = jr.Decode(meta); err != nil {
return nil, err return nil
} }
return meta, nil return meta
} }
// Remove removes this object // Remove removes this object
@@ -1101,9 +1102,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
origName := o.Remote() origName := o.Remote()
if o.meta.Mode != Uncompressed || compressible { if o.meta.Mode != Uncompressed || compressible {
newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType)
if err != nil {
return err
}
if newObject.Object.Remote() != o.Object.Remote() { if newObject.Object.Remote() != o.Object.Remote() {
if removeErr := o.Object.Remove(ctx); removeErr != nil { if removeErr := o.Object.Remove(ctx); removeErr != nil {
return removeErr return removeErr
@@ -1117,9 +1115,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
} }
// If we are, just update the object and metadata // If we are, just update the object and metadata
newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType) newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType)
if err != nil { }
return err if err != nil {
} return err
} }
// Update object metadata and return // Update object metadata and return
o.Object = newObject.Object o.Object = newObject.Object
@@ -1130,9 +1128,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified. // This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified.
func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object { func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object {
if o == nil {
log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1145,9 +1140,6 @@ func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object
// This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand. // This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand.
func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object { func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object {
if o == nil {
log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size)
}
return &Object{ return &Object{
Object: o, Object: o,
f: f, f: f,
@@ -1175,7 +1167,7 @@ func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) {
return err return err
} }
if o.meta == nil { if o.meta == nil {
o.meta, err = readMetadata(ctx, o.mo) o.meta = readMetadata(ctx, o.mo)
} }
return err return err
} }

View File

@@ -1210,7 +1210,6 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
WriteMimeType: true, WriteMimeType: true,
CanHaveEmptyDirectories: true, CanHaveEmptyDirectories: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
FilterAware: true,
}).Fill(ctx, f) }).Fill(ctx, f)
// Create a new authorized Drive client. // Create a new authorized Drive client.

View File

@@ -518,9 +518,6 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery // TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) { func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
opt := &filter.Opt{} opt := &filter.Opt{}
err := opt.MaxAge.Set("1h") err := opt.MaxAge.Set("1h")
assert.NoError(t, err) assert.NoError(t, err)

View File

@@ -124,11 +124,6 @@ So for |concurrency 3| you'd use |--checkers 2 --transfers 2
Help: "Use MDTM to set modification time (VsFtpd quirk)", Help: "Use MDTM to set modification time (VsFtpd quirk)",
Default: false, Default: false,
Advanced: true, Advanced: true,
}, {
Name: "force_list_hidden",
Help: "Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.",
Default: false,
Advanced: true,
}, { }, {
Name: "idle_timeout", Name: "idle_timeout",
Default: fs.Duration(60 * time.Second), Default: fs.Duration(60 * time.Second),
@@ -210,7 +205,6 @@ type Options struct {
DisableMLSD bool `config:"disable_mlsd"` DisableMLSD bool `config:"disable_mlsd"`
DisableUTF8 bool `config:"disable_utf8"` DisableUTF8 bool `config:"disable_utf8"`
WritingMDTM bool `config:"writing_mdtm"` WritingMDTM bool `config:"writing_mdtm"`
ForceListHidden bool `config:"force_list_hidden"`
IdleTimeout fs.Duration `config:"idle_timeout"` IdleTimeout fs.Duration `config:"idle_timeout"`
CloseTimeout fs.Duration `config:"close_timeout"` CloseTimeout fs.Duration `config:"close_timeout"`
ShutTimeout fs.Duration `config:"shut_timeout"` ShutTimeout fs.Duration `config:"shut_timeout"`
@@ -336,44 +330,14 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
fs.Debugf(f, "Connecting to FTP server") fs.Debugf(f, "Connecting to FTP server")
// Make ftp library dial with fshttp dialer optionally using TLS // Make ftp library dial with fshttp dialer optionally using TLS
initialConnection := true
dial := func(network, address string) (conn net.Conn, err error) { dial := func(network, address string) (conn net.Conn, err error) {
fs.Debugf(f, "dial(%q,%q)", network, address)
defer func() {
fs.Debugf(f, "> dial: conn=%T, err=%v", conn, err)
}()
conn, err = fshttp.NewDialer(ctx).Dial(network, address) conn, err = fshttp.NewDialer(ctx).Dial(network, address)
if err != nil { if f.tlsConf != nil && err == nil {
return nil, err conn = tls.Client(conn, f.tlsConf)
} }
// Connect using cleartext only for non TLS return
if f.tlsConf == nil {
return conn, nil
}
// Initial connection only needs to be cleartext for explicit TLS
if f.opt.ExplicitTLS && initialConnection {
initialConnection = false
return conn, nil
}
// Upgrade connection to TLS
tlsConn := tls.Client(conn, f.tlsConf)
// Do the initial handshake - tls.Client doesn't do it for us
// If we do this then connections to proftpd/pureftpd lock up
// See: https://github.com/rclone/rclone/issues/6426
// See: https://github.com/jlaffaye/ftp/issues/282
if false {
err = tlsConn.HandshakeContext(ctx)
if err != nil {
_ = conn.Close()
return nil, err
}
}
return tlsConn, nil
}
ftpConfig := []ftp.DialOption{
ftp.DialWithContext(ctx),
ftp.DialWithDialFunc(dial),
} }
ftpConfig := []ftp.DialOption{ftp.DialWithDialFunc(dial)}
if f.opt.TLS { if f.opt.TLS {
// Our dialer takes care of TLS but ftp library also needs tlsConf // Our dialer takes care of TLS but ftp library also needs tlsConf
@@ -381,6 +345,12 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf)) ftpConfig = append(ftpConfig, ftp.DialWithTLS(f.tlsConf))
} else if f.opt.ExplicitTLS { } else if f.opt.ExplicitTLS {
ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf)) ftpConfig = append(ftpConfig, ftp.DialWithExplicitTLS(f.tlsConf))
// Initial connection needs to be cleartext for explicit TLS
conn, err := fshttp.NewDialer(ctx).Dial("tcp", f.dialAddr)
if err != nil {
return nil, err
}
ftpConfig = append(ftpConfig, ftp.DialWithNetConn(conn))
} }
if f.opt.DisableEPSV { if f.opt.DisableEPSV {
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true)) ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
@@ -397,9 +367,6 @@ func (f *Fs) ftpConnection(ctx context.Context) (c *ftp.ServerConn, err error) {
if f.opt.WritingMDTM { if f.opt.WritingMDTM {
ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true)) ftpConfig = append(ftpConfig, ftp.DialWithWritingMDTM(true))
} }
if f.opt.ForceListHidden {
ftpConfig = append(ftpConfig, ftp.DialWithForceListHidden(true))
}
if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 { if f.ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpRequests|fs.DumpResponses) != 0 {
ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0})) ftpConfig = append(ftpConfig, ftp.DialWithDebugOutput(&debugLog{auth: f.ci.Dump&fs.DumpAuth != 0}))
} }

View File

@@ -34,9 +34,9 @@ func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
// test that big file uploads do not cause network i/o timeout // test that big file uploads do not cause network i/o timeout
func (f *Fs) testUploadTimeout(t *testing.T) { func (f *Fs) testUploadTimeout(t *testing.T) {
const ( const (
fileSize = 100000000 // 100 MiB fileSize = 100000000 // 100 MiB
idleTimeout = 1 * time.Second // small because test server is local idleTimeout = 40 * time.Millisecond // small because test server is local
maxTime = 10 * time.Second // prevent test hangup maxTime = 10 * time.Second // prevent test hangup
) )
if testing.Short() { if testing.Short() {

View File

@@ -300,7 +300,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
ReadMetadata: true, ReadMetadata: true,
WriteMetadata: true, WriteMetadata: true,
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
FilterAware: true,
}).Fill(ctx, f) }).Fill(ctx, f)
if opt.FollowSymlinks { if opt.FollowSymlinks {
f.lstat = os.Stat f.lstat = os.Stat

View File

@@ -378,9 +378,6 @@ func TestFilter(t *testing.T) {
r.WriteFile("excluded", "excluded file", when) r.WriteFile("excluded", "excluded file", when)
f := r.Flocal.(*Fs) f := r.Flocal.(*Fs)
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Add a filter // Add a filter
ctx, fi := filter.AddConfig(ctx) ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included")) require.NoError(t, fi.AddRule("+ included"))

View File

@@ -891,12 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f) }).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler) f.srv.SetErrorHandler(errorHandler)
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
f.features.ChangeNotify = nil
}
// Renew the token in the background // Renew the token in the background
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
_, _, err := f.readMetaDataForPath(ctx, "") _, _, err := f.readMetaDataForPath(ctx, "")

View File

@@ -64,7 +64,7 @@ import (
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
Name: "s3", Name: "s3",
Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi", Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi",
NewFs: NewFs, NewFs: NewFs,
CommandHelp: commandHelp, CommandHelp: commandHelp,
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
@@ -116,9 +116,6 @@ func init() {
}, { }, {
Value: "IDrive", Value: "IDrive",
Help: "IDrive e2", Help: "IDrive e2",
}, {
Value: "IONOS",
Help: "IONOS Cloud",
}, { }, {
Value: "LyveCloud", Value: "LyveCloud",
Help: "Seagate Lyve Cloud", Help: "Seagate Lyve Cloud",
@@ -387,24 +384,10 @@ func init() {
Value: "auto", Value: "auto",
Help: "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.", Help: "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.",
}}, }},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "IONOS",
Examples: []fs.OptionExample{{
Value: "de",
Help: "Frankfurt, Germany",
}, {
Value: "eu-central-2",
Help: "Berlin, Germany",
}, {
Value: "eu-south-2",
Help: "Logrono, Spain",
}},
}, { }, {
Name: "region", Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.", Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,IONOS,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive", Provider: "!AWS,Alibaba,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,Storj,TencentCOS,HuaweiOBS,IDrive",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "", Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.", Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -715,20 +698,6 @@ func init() {
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud", Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
Help: "Singapore Single Site Private Endpoint", Help: "Singapore Single Site Private Endpoint",
}}, }},
}, {
Name: "endpoint",
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
Provider: "IONOS",
Examples: []fs.OptionExample{{
Value: "s3-eu-central-1.ionoscloud.com",
Help: "Frankfurt, Germany",
}, {
Value: "s3-eu-central-2.ionoscloud.com",
Help: "Berlin, Germany",
}, {
Value: "s3-eu-south-2.ionoscloud.com",
Help: "Logrono, Spain",
}},
}, { }, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html // oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint", Name: "endpoint",
@@ -1032,7 +1001,7 @@ func init() {
}, { }, {
Name: "endpoint", Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp", Provider: "!AWS,IBMCOS,IDrive,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,ArvanCloud,Scaleway,StackPath,Storj,RackCorp",
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io", Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint", Help: "Dream Objects endpoint",
@@ -1442,7 +1411,7 @@ func init() {
}, { }, {
Name: "location_constraint", Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.", Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS", Provider: "!AWS,IBMCOS,IDrive,Alibaba,HuaweiOBS,ChinaMobile,Cloudflare,ArvanCloud,RackCorp,Scaleway,StackPath,Storj,TencentCOS",
}, { }, {
Name: "acl", Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects. Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -1566,21 +1535,8 @@ isn't set then "acl" is used instead.`,
Help: "arn:aws:kms:*", Help: "arn:aws:kms:*",
}}, }},
}, { }, {
Name: "sse_customer_key", Name: "sse_customer_key",
Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data. Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
Alternatively you can provide --sse-customer-key-base64.`,
Provider: "AWS,Ceph,ChinaMobile,Minio",
Advanced: true,
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}},
}, {
Name: "sse_customer_key_base64",
Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
Alternatively you can provide --sse-customer-key.`,
Provider: "AWS,Ceph,ChinaMobile,Minio", Provider: "AWS,Ceph,ChinaMobile,Minio",
Advanced: true, Advanced: true,
Examples: []fs.OptionExample{{ Examples: []fs.OptionExample{{
@@ -2155,7 +2111,6 @@ type Options struct {
SSEKMSKeyID string `config:"sse_kms_key_id"` SSEKMSKeyID string `config:"sse_kms_key_id"`
SSECustomerAlgorithm string `config:"sse_customer_algorithm"` SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
SSECustomerKey string `config:"sse_customer_key"` SSECustomerKey string `config:"sse_customer_key"`
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"` SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
StorageClass string `config:"storage_class"` StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
@@ -2582,10 +2537,6 @@ func setQuirks(opt *Options) {
useMultipartEtag = false // untested useMultipartEtag = false // untested
case "IDrive": case "IDrive":
virtualHostStyle = false virtualHostStyle = false
case "IONOS":
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
virtualHostStyle = false
urlEncodeListings = false
case "LyveCloud": case "LyveCloud":
useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
case "Minio": case "Minio":
@@ -2693,16 +2644,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if opt.BucketACL == "" { if opt.BucketACL == "" {
opt.BucketACL = opt.ACL opt.BucketACL = opt.ACL
} }
if opt.SSECustomerKeyBase64 != "" && opt.SSECustomerKey != "" {
return nil, errors.New("s3: can't use sse_customer_key and sse_customer_key_base64 at the same time")
} else if opt.SSECustomerKeyBase64 != "" {
// Decode the base64-encoded key and store it in the SSECustomerKey field
decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
if err != nil {
return nil, fmt.Errorf("s3: Could not decode sse_customer_key_base64: %w", err)
}
opt.SSECustomerKey = string(decoded)
}
if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" { if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
// calculate CustomerKeyMD5 if not supplied // calculate CustomerKeyMD5 if not supplied
md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey)) md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
@@ -2817,7 +2758,8 @@ func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s
if isDirectory { if isDirectory {
return nil return nil
} }
if wantRemote != gotRemote { // compare the base name only since the listing will have a prefix
if path.Base(wantRemote) != path.Base(gotRemote) {
return nil return nil
} }
info = object info = object

View File

@@ -6,12 +6,16 @@ import (
"context" "context"
"crypto/md5" "crypto/md5"
"fmt" "fmt"
"path"
"strings"
"testing" "testing"
"time" "time"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fspath"
"github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/fstests" "github.com/rclone/rclone/fstest/fstests"
@@ -250,7 +254,7 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
// Create an object // Create an object
const fileName = "test-versions.txt" const fileName = "versions/test-versions.txt"
contents := random.String(100) contents := random.String(100)
item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z")) item := fstest.NewItem(fileName, contents, fstest.Time("2001-05-06T04:05:06.499999999Z"))
obj := fstests.PutTestContents(ctx, t, f, &item, contents, true) obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
@@ -280,7 +284,7 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
}() }()
// Read the contents // Read the contents
entries, err := f.List(ctx, "") entries, err := f.List(ctx, "versions")
require.NoError(t, err) require.NoError(t, err)
tests := 0 tests := 0
var fileNameVersion string var fileNameVersion string
@@ -295,12 +299,24 @@ func (f *Fs) InternalTestVersions(t *testing.T) {
t.Run("ReadVersion", func(t *testing.T) { t.Run("ReadVersion", func(t *testing.T) {
assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1)) assert.Equal(t, contents, fstests.ReadObject(ctx, t, entry.(fs.Object), -1))
}) })
t.Run("NewFs", func(t *testing.T) {
// Check we can find the object with NewFs
fPath := fs.ConfigString(f)
fPath = strings.Replace(fPath, ":", ",versions=true:", 1)
subFPath := fspath.JoinRootPath(fPath, entry.Remote())
subF, err := cache.Get(ctx, subFPath)
require.Equal(t, fs.ErrorIsFile, err, "Remote %q didn't find a file", subFPath)
require.NotNil(t, subF)
o, err := subF.NewObject(ctx, path.Base(entry.Remote()))
require.NoError(t, err)
assert.Equal(t, contents, fstests.ReadObject(ctx, t, o, -1))
})
assert.WithinDuration(t, obj.(*Object).lastModified, versionTime, time.Second, "object time must be with 1 second of version time") assert.WithinDuration(t, obj.(*Object).lastModified, versionTime, time.Second, "object time must be with 1 second of version time")
fileNameVersion = remote fileNameVersion = remote
tests++ tests++
} }
} }
assert.Equal(t, 2, tests, "object missing from listing") assert.Equal(t, 2, tests, "object missing from listing: %v", entries)
// Check we can read the object with a version suffix // Check we can read the object with a version suffix
t.Run("NewObject", func(t *testing.T) { t.Run("NewObject", func(t *testing.T) {

View File

@@ -1171,10 +1171,6 @@ func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
err = c.sftpClient.Mkdir(dirPath) err = c.sftpClient.Mkdir(dirPath)
f.putSftpConnection(&c, err) f.putSftpConnection(&c, err)
if err != nil { if err != nil {
if os.IsExist(err) {
fs.Debugf(f, "directory %q exists after Mkdir is attempted", dirPath)
return nil
}
return fmt.Errorf("mkdir %q failed: %w", dirPath, err) return fmt.Errorf("mkdir %q failed: %w", dirPath, err)
} }
return nil return nil

View File

@@ -15,7 +15,6 @@ else
fi fi
rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \ rclone ${dry_run} -vv -P --checkers 16 --transfers 16 delete \
--fast-list \
--include "/${version}**" \ --include "/${version}**" \
--include "/branch/*/${version}**" \ --include "/branch/${version}**" \
memstore:beta-rclone-org memstore:beta-rclone-org

View File

@@ -62,7 +62,7 @@ func startProgress() func() {
printProgress("") printProgress("")
fs.LogPrint = oldLogPrint fs.LogPrint = oldLogPrint
operations.SyncPrintf = oldSyncPrint operations.SyncPrintf = oldSyncPrint
fmt.Fprintln(terminal.Out, "") fmt.Println("")
return return
} }
} }

View File

@@ -135,7 +135,6 @@ WebDAV or S3, that work out of the box.)
{{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}}
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}} {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}} {{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}}
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}} {{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
{{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}} {{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
{{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}} {{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}

View File

@@ -637,7 +637,3 @@ put them back in again.` >}}
* Ryan Morey <4590343+rmorey@users.noreply.github.com> * Ryan Morey <4590343+rmorey@users.noreply.github.com>
* Simon Bos <simonbos9@gmail.com> * Simon Bos <simonbos9@gmail.com>
* YFdyh000 <yfdyh000@gmail.com> * Josh Soref <2119212+jsoref@users.noreply.github.com> * YFdyh000 <yfdyh000@gmail.com> * Josh Soref <2119212+jsoref@users.noreply.github.com>
* Øyvind Heddeland Instefjord <instefjord@outlook.com>
* Dmitry Deniskin <110819396+ddeniskin@users.noreply.github.com>
* Alexander Knorr <106825+opexxx@users.noreply.github.com>
* Richard Bateman <richard@batemansr.us>

View File

@@ -5,21 +5,6 @@ description: "Rclone Changelog"
# Changelog # Changelog
## v1.59.2 - 2022-09-15
[See commits](https://github.com/rclone/rclone/compare/v1.59.1...v1.59.2)
* Bug Fixes
* config: Move locking to fix fatal error: concurrent map read and map write (Nick Craig-Wood)
* Local
* Disable xattr support if the filesystems indicates it is not supported (Nick Craig-Wood)
* Azure Blob
* Fix chunksize calculations producing too many parts (Nick Craig-Wood)
* B2
* Fix chunksize calculations producing too many parts (Nick Craig-Wood)
* S3
* Fix chunksize calculations producing too many parts (Nick Craig-Wood)
## v1.59.1 - 2022-08-08 ## v1.59.1 - 2022-08-08
[See commits](https://github.com/rclone/rclone/compare/v1.59.0...v1.59.1) [See commits](https://github.com/rclone/rclone/compare/v1.59.0...v1.59.1)

View File

@@ -341,6 +341,8 @@ mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/p
or create systemd mount units: or create systemd mount units:
``` ```
# /etc/systemd/system/mnt-data.mount # /etc/systemd/system/mnt-data.mount
[Unit]
After=network-online.target
[Mount] [Mount]
Type=rclone Type=rclone
What=sftp1:subdir What=sftp1:subdir
@@ -352,6 +354,7 @@ optionally accompanied by systemd automount unit
``` ```
# /etc/systemd/system/mnt-data.automount # /etc/systemd/system/mnt-data.automount
[Unit] [Unit]
After=network-online.target
Before=remote-fs.target Before=remote-fs.target
[Automount] [Automount]
Where=/mnt/data Where=/mnt/data

View File

@@ -1868,22 +1868,13 @@ By default, rclone doesn't keep track of renamed files, so if you
rename a file locally then sync it to a remote, rclone will delete the rename a file locally then sync it to a remote, rclone will delete the
old file on the remote and upload a new copy. old file on the remote and upload a new copy.
An rclone sync with `--track-renames` runs like a normal sync, but keeps If you use this flag, and the remote supports server-side copy or
track of objects which exist in the destination but not in the source server-side move, and the source and destination have a compatible
(which would normally be deleted), and which objects exist in the hash, then this will track renames during `sync`
source but not the destination (which would normally be transferred). operations and perform renaming server-side.
These objects are then candidates for renaming.
After the sync, rclone matches up the source only and destination only Files will be matched by size and hash - if both match then a rename
objects using the `--track-renames-strategy` specified and either will be considered.
renames the destination object or transfers the source and deletes the
destination object. `--track-renames` is stateless like all of
rclone's syncs.
To use this flag the destination must support server-side copy or
server-side move, and to use a hash based `--track-renames-strategy`
(the default) the source and the destination must have a compatible
hash.
If the destination does not support server-side copy or move, rclone If the destination does not support server-side copy or move, rclone
will fall back to the default behaviour and log an error level message will fall back to the default behaviour and log an error level message
@@ -1901,7 +1892,7 @@ Note also that `--track-renames` is incompatible with
### --track-renames-strategy (hash,modtime,leaf,size) ### ### --track-renames-strategy (hash,modtime,leaf,size) ###
This option changes the file matching criteria for `--track-renames`. This option changes the matching criteria for `--track-renames`.
The matching is controlled by a comma separated selection of these tokens: The matching is controlled by a comma separated selection of these tokens:
@@ -1910,15 +1901,15 @@ The matching is controlled by a comma separated selection of these tokens:
- `leaf` - the name of the file not including its directory name - `leaf` - the name of the file not including its directory name
- `size` - the size of the file (this is always enabled) - `size` - the size of the file (this is always enabled)
The default option is `hash`. So using `--track-renames-strategy modtime,leaf` would match files
Using `--track-renames-strategy modtime,leaf` would match files
based on modification time, the leaf of the file name and the size based on modification time, the leaf of the file name and the size
only. only.
Using `--track-renames-strategy modtime` or `leaf` can enable Using `--track-renames-strategy modtime` or `leaf` can enable
`--track-renames` support for encrypted destinations. `--track-renames` support for encrypted destinations.
If nothing is specified, the default option is matching by `hash`es.
Note that the `hash` strategy is not supported with encrypted destinations. Note that the `hash` strategy is not supported with encrypted destinations.
### --delete-(before,during,after) ### ### --delete-(before,during,after) ###

View File

@@ -357,7 +357,6 @@ and may be set in the config file.
--ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32) --ftp-tls-cache-size int Size of TLS session cache for all control and data connections (default 32)
--ftp-user string FTP username (default "$USER") --ftp-user string FTP username (default "$USER")
--ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk) --ftp-writing-mdtm Use MDTM to set modification time (VsFtpd quirk)
--ftp-force-list-hidden Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.
--gcs-anonymous Access public buckets and objects without credentials --gcs-anonymous Access public buckets and objects without credentials
--gcs-auth-url string Auth server URL --gcs-auth-url string Auth server URL
--gcs-bucket-acl string Access Control List for new buckets --gcs-bucket-acl string Access Control List for new buckets

View File

@@ -310,17 +310,6 @@ Properties:
- Type: bool - Type: bool
- Default: false - Default: false
#### --ftp-force-list-hidden
Use LIST -a to force listing of hidden files and folders. This will disable the use of MLSD.
Properties:
- Config: force_list_hidden
- Env Var: RCLONE_FTP_FORCE_LIST_HIDDEN
- Type: bool
- Default: false
#### --ftp-idle-timeout #### --ftp-idle-timeout
Max time before closing idle connections. Max time before closing idle connections.

View File

@@ -14,7 +14,7 @@ Rclone is a Go program and comes as a single binary file.
* Run `rclone config` to setup. See [rclone config docs](/docs/) for more details. * Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
* Optionally configure [automatic execution](#autostart). * Optionally configure [automatic execution](#autostart).
See below for some expanded Linux / macOS / Windows instructions. See below for some expanded Linux / macOS instructions.
See the [usage](/docs/) docs for how to use rclone, or See the [usage](/docs/) docs for how to use rclone, or
run `rclone -h`. run `rclone -h`.
@@ -35,9 +35,7 @@ For beta installation, run:
Note that this script checks the version of rclone installed first and Note that this script checks the version of rclone installed first and
won't re-download if not needed. won't re-download if not needed.
## Linux installation {#linux} ## Linux installation from precompiled binary
### Precompiled binary {#linux-precompiled}
Fetch and unpack Fetch and unpack
@@ -61,9 +59,7 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config rclone config
## macOS installation {#macos} ## macOS installation with brew
### Installation with brew {#macos-brew}
brew install rclone brew install rclone
@@ -72,12 +68,7 @@ NOTE: This version of rclone will not support `mount` any more (see
on macOS, either install a precompiled binary or enable the relevant option on macOS, either install a precompiled binary or enable the relevant option
when [installing from source](#install-from-source). when [installing from source](#install-from-source).
Note that this is a third party installer not controlled by the rclone ## macOS installation from precompiled binary, using curl
developers so it may be out of date. Its current version is as below.
[![Homebrew package](https://repology.org/badge/version-for-repo/homebrew/rclone.svg)](https://repology.org/project/rclone/versions)
### Precompiled binary, using curl {#macos-precompiled}
To avoid problems with macOS gatekeeper enforcing the binary to be signed and To avoid problems with macOS gatekeeper enforcing the binary to be signed and
notarized it is enough to download with `curl`. notarized it is enough to download with `curl`.
@@ -105,7 +96,7 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config rclone config
### Precompiled binary, using a web browser {#macos-precompiled-web} ## macOS installation from precompiled binary, using a web browser
When downloading a binary with a web browser, the browser will set the macOS When downloading a binary with a web browser, the browser will set the macOS
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
@@ -118,73 +109,11 @@ The simplest fix is to run
xattr -d com.apple.quarantine rclone xattr -d com.apple.quarantine rclone
## Windows installation {#windows} ## Install with docker
### Precompiled binary {#windows-precompiled} The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
These images are autobuilt by docker hub from the rclone source based
Fetch the correct binary for your processor type by clicking on these on a minimal Alpine linux image.
links. If not sure, use the first link.
- [Intel/AMD - 64 Bit](https://downloads.rclone.org/rclone-current-linux-amd64.zip)
- [Intel/AMD - 32 Bit](https://downloads.rclone.org/rclone-current-linux-386.zip)
- [ARM - 64 Bit](https://downloads.rclone.org/rclone-current-linux-arm64.zip)
Open this file in the Explorer and extract `rclone.exe`. Rclone is a
portable executable so you can place it wherever is convenient.
Open a CMD window (or powershell) and run the binary. Note that rclone
does not launch a GUI by default, it runs in the CMD Window.
- Run `rclone.exe config` to setup. See [rclone config docs](/docs/) for more details.
- Optionally configure [automatic execution](#autostart).
If you are planning to use the [rclone mount](/commands/rclone_mount/)
feature then you will need to install the third party utility
[WinFsp](https://winfsp.dev/) also.
### Chocolatey package manager {#windows-chocolatey}
Make sure you have [Choco](https://chocolatey.org/) installed
```
choco search rclone
choco install rclone
```
This will install rclone on your Windows machine. If you are planning
to use [rclone mount](/commands/rclone_mount/) then
```
choco install winfsp
```
will install that too.
Note that this is a third party installer not controlled by the rclone
developers so it may be out of date. Its current version is as below.
[![Chocolatey package](https://repology.org/badge/version-for-repo/chocolatey/rclone.svg)](https://repology.org/project/rclone/versions)
## Package manager installation {#package-manager}
Many Linux, Windows, macOS and other OS distributions package and
distribute rclone.
The distributed versions of rclone are often quite out of date and for
this reason we recommend one of the other installation methods if
possible.
You can get an idea of how up to date or not your OS distribution's
package is here.
[![Packaging status](https://repology.org/badge/vertical-allrepos/rclone.svg?columns=3)](https://repology.org/project/rclone/versions)
## Docker installation {#docker}
The rclone developers maintain a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).
These images are built as part of the release process based on a
minimal Alpine Linux.
The `:latest` tag will always point to the latest stable release. You The `:latest` tag will always point to the latest stable release. You
can use the `:beta` tag to get the latest build from master. You can can use the `:beta` tag to get the latest build from master. You can
@@ -259,7 +188,7 @@ ls ~/data/mount
kill %1 kill %1
``` ```
## Source installation {#source} ## Install from source
Make sure you have git and [Go](https://golang.org/) installed. Make sure you have git and [Go](https://golang.org/) installed.
Go version 1.17 or newer is required, latest release is recommended. Go version 1.17 or newer is required, latest release is recommended.
@@ -278,7 +207,7 @@ in the same folder. As an initial check you can now run `./rclone version`
(`.\rclone version` on Windows). (`.\rclone version` on Windows).
Note that on macOS and Windows the [mount](https://rclone.org/commands/rclone_mount/) Note that on macOS and Windows the [mount](https://rclone.org/commands/rclone_mount/)
command will not be available unless you specify an additional build tag `cmount`. command will not be available unless you specify additional build tag `cmount`.
``` ```
go build -tags cmount go build -tags cmount
@@ -297,7 +226,7 @@ distribution (make sure you install it in the classic mingw64 subsystem, the
ucrt64 version is not compatible). ucrt64 version is not compatible).
Additionally, on Windows, you must install the third party utility Additionally, on Windows, you must install the third party utility
[WinFsp](https://winfsp.dev/), with the "Developer" feature selected. [WinFsp](http://www.secfs.net/winfsp/), with the "Developer" feature selected.
If building with cgo, you must also set environment variable CPATH pointing to If building with cgo, you must also set environment variable CPATH pointing to
the fuse include directory within the WinFsp installation the fuse include directory within the WinFsp installation
(normally `C:\Program Files (x86)\WinFsp\inc\fuse`). (normally `C:\Program Files (x86)\WinFsp\inc\fuse`).
@@ -312,10 +241,9 @@ go build -trimpath -ldflags -s -tags cmount
``` ```
Instead of executing the `go build` command directly, you can run it via the Instead of executing the `go build` command directly, you can run it via the
Makefile. It changes the version number suffix from "-DEV" to "-beta" and Makefile, which also sets version information and copies the resulting rclone
appends commit details. It also copies the resulting rclone executable into executable into your GOPATH bin folder (`$(go env GOPATH)/bin`, which
your GOPATH bin folder (`$(go env GOPATH)/bin`, which corresponds to corresponds to `~/go/bin/rclone` by default).
`~/go/bin/rclone` by default).
``` ```
make make
@@ -327,15 +255,7 @@ To include mount command on macOS and Windows with Makefile build:
make GOTAGS=cmount make GOTAGS=cmount
``` ```
There are other make targets that can be used for more advanced builds, As an alternative you can download the source, build and install rclone in one
such as cross-compiling for all supported os/architectures, embedding
icon and version info resources into windows executable, and packaging
results into release artifacts.
See [Makefile](https://github.com/rclone/rclone/blob/master/Makefile)
and [cross-compile.go](https://github.com/rclone/rclone/blob/master/bin/cross-compile.go)
for details.
Another alternative is to download the source, build and install rclone in one
operation, as a regular Go package. The source will be stored it in the Go operation, as a regular Go package. The source will be stored it in the Go
module cache, and the resulting executable will be in your GOPATH bin folder module cache, and the resulting executable will be in your GOPATH bin folder
(`$(go env GOPATH)/bin`, which corresponds to `~/go/bin/rclone` by default). (`$(go env GOPATH)/bin`, which corresponds to `~/go/bin/rclone` by default).
@@ -354,7 +274,7 @@ with the current version):
go get github.com/rclone/rclone go get github.com/rclone/rclone
``` ```
## Ansible installation {#ansible} ## Installation with Ansible
This can be done with [Stefan Weichinger's ansible This can be done with [Stefan Weichinger's ansible
role](https://github.com/stefangweichinger/ansible-rclone). role](https://github.com/stefangweichinger/ansible-rclone).
@@ -370,7 +290,7 @@ Instructions
- rclone - rclone
``` ```
## Portable installation {#portable} ## Portable installation
As mentioned [above](https://rclone.org/install/#quickstart), rclone is single As mentioned [above](https://rclone.org/install/#quickstart), rclone is single
executable (`rclone`, or `rclone.exe` on Windows) that you can download as a executable (`rclone`, or `rclone.exe` on Windows) that you can download as a

View File

@@ -152,7 +152,7 @@ A common error is that the publisher of the App is not verified.
You may try to [verify you account](https://docs.microsoft.com/en-us/azure/active-directory/develop/publisher-verification-overview), or try to limit the App to your organization only, as shown below. You may try to [verify you account](https://docs.microsoft.com/en-us/azure/active-directory/develop/publisher-verification-overview), or try to limit the App to your organization only, as shown below.
1. Make sure to create the App with your business account. 1. Make sure to create the App with your business account.
2. Follow the steps above to create an App. However, we need a different account type here: `Accounts in this organizational directory only (*** - Single tenant)`. Note that you can also change the account type after creating the App. 2. Follow the steps above to create an App. However, we need a different account type here: `Accounts in this organizational directory only (*** - Single tenant)`. Note that you can also change the account type aftering creating the App.
3. Find the [tenant ID](https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant) of your organization. 3. Find the [tenant ID](https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-how-to-find-tenant) of your organization.
4. In the rclone config, set `auth_url` to `https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/v2.0/authorize`. 4. In the rclone config, set `auth_url` to `https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/v2.0/authorize`.
5. In the rclone config, set `token_url` to `https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/v2.0/token`. 5. In the rclone config, set `token_url` to `https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/v2.0/token`.

View File

@@ -19,7 +19,6 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}} {{< provider name="Huawei OBS" home="https://www.huaweicloud.com/intl/en-us/product/obs.html" config="/s3/#huawei-obs" >}}
{{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}} {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}} {{< provider name="IDrive e2" home="https://www.idrive.com/e2/" config="/s3/#idrive-e2" >}}
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}} {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}} {{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}} {{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
@@ -3534,169 +3533,6 @@ d) Delete this remote
y/e/d> y y/e/d> y
``` ```
### IONOS Cloud {#ionos}
[IONOS S3 Object Storage](https://cloud.ionos.com/storage/object-storage) is a service offered by IONOS for storing and accessing unstructured data.
To connect to the service, you will need an access key and a secret key. These can be found in the [Data Center Designer](https://dcd.ionos.com/), by selecting **Manager resources** > **Object Storage Key Manager**.
Here is an example of a configuration. First, run `rclone config`. This will walk you through an interactive setup process. Type `n` to add the new remote, and then enter a name:
```
Enter name for new remote.
name> ionos-fra
```
Type `s3` to choose the connection type:
```
Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, Digital Ocean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Tencent COS and Wasabi
\ (s3)
[snip]
Storage> s3
```
Type `IONOS`:
```
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
[snip]
XX / IONOS Cloud
\ (IONOS)
[snip]
provider> IONOS
```
Press Enter to choose the default option `Enter AWS credentials in the next step`:
```
Option env_auth.
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own boolean value (true or false).
Press Enter for the default (false).
1 / Enter AWS credentials in the next step.
\ (false)
2 / Get AWS credentials from the environment (env vars or IAM).
\ (true)
env_auth>
```
Enter your Access Key and Secret key. These can be retrieved in the [Data Center Designer](https://dcd.ionos.com/), click on the menu “Manager resources” / "Object Storage Key Manager".
```
Option access_key_id.
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
access_key_id> YOUR_ACCESS_KEY
Option secret_access_key.
AWS Secret Access Key (password).
Leave blank for anonymous access or runtime credentials.
Enter a value. Press Enter to leave empty.
secret_access_key> YOUR_SECRET_KEY
```
Choose the region where your bucket is located:
```
Option region.
Region where your bucket will be created and your data stored.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / Frankfurt, Germany
\ (de)
2 / Berlin, Germany
\ (eu-central-2)
3 / Logrono, Spain
\ (eu-south-2)
region> 2
```
Choose the endpoint from the same region:
```
Option endpoint.
Endpoint for IONOS S3 Object Storage.
Specify the endpoint from the same region.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
1 / Frankfurt, Germany
\ (s3-eu-central-1.ionoscloud.com)
2 / Berlin, Germany
\ (s3-eu-central-2.ionoscloud.com)
3 / Logrono, Spain
\ (s3-eu-south-2.ionoscloud.com)
endpoint> 1
```
Press Enter to choose the default option or choose the desired ACL setting:
```
Option acl.
Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server-side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
Choose a number from below, or type in your own value.
Press Enter to leave empty.
/ Owner gets FULL_CONTROL.
1 | No one else has access rights (default).
\ (private)
/ Owner gets FULL_CONTROL.
[snip]
acl>
```
Press Enter to skip the advanced config:
```
Edit advanced config?
y) Yes
n) No (default)
y/n>
```
Press Enter to save the configuration, and then `q` to quit the configuration process:
```
Configuration complete.
Options:
- type: s3
- provider: IONOS
- access_key_id: YOUR_ACCESS_KEY
- secret_access_key: YOUR_SECRET_KEY
- endpoint: s3-eu-central-1.ionoscloud.com
Keep this "ionos-fra" remote?
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
```
Done! Now you can try some commands (for macOS, use `./rclone` instead of `rclone`).
1) Create a bucket (the name must be unique within the whole IONOS S3)
```
rclone mkdir ionos-fra:my-bucket
```
2) List available buckets
```
rclone lsd ionos-fra:
```
4) Copy a file from local to remote
```
rclone copy /Users/file.txt ionos-fra:my-bucket
```
3) List contents of a bucket
```
rclone ls ionos-fra:my-bucket
```
5) Copy a file from remote to local
```
rclone copy ionos-fra:my-bucket/file.txt
```
### Minio ### Minio
[Minio](https://minio.io/) is an object storage server built for cloud application developers and devops. [Minio](https://minio.io/) is an object storage server built for cloud application developers and devops.

View File

@@ -17,9 +17,9 @@
<div class="card-body"> <div class="card-body">
<p class="menu"> <p class="menu">
<!-- Non tracking sharing links from: https://sharingbuttons.io/ --> <!-- Non tracking sharing links from: https://sharingbuttons.io/ -->
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/intent/tweet/?text=rclone%20-%20rsync%20for%20cloud%20storage%20from%20%40njcw&amp;url=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Twitter">Twitter</a><br /> <i class="fab fa-twitter" aria-hidden="true"></i> <a href="https://twitter.com/intent/tweet/?text=rclone%20-%20rsync%20for%20cloud%20storage%20from%20%40njcw&amp;url=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Twitter">Twitter</a><br />
<i class="fab fa-facebook fa-fw" aria-hidden="true"></i> <a href="https://facebook.com/sharer/sharer.php?u=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Facebook">Facebook</a><br /> <i class="fab fa-facebook" aria-hidden="true"></i> <a href="https://facebook.com/sharer/sharer.php?u=https%3A%2F%2Frclone.org" target="_blank" rel="noopener" aria-label="Share on Facebook">Facebook</a><br />
<i class="fab fa-reddit fa-fw" aria-hidden="true"></i> <a href="https://reddit.com/submit/?url=https%3A%2F%2Frclone.org&amp;resubmit=true&amp;title=rclone%20-%20rsync%20for%20cloud%20storage" target="_blank" rel="noopener" aria-label="Share on Reddit">Reddit</a><br /> <i class="fab fa-reddit" aria-hidden="true"></i> <a href="https://reddit.com/submit/?url=https%3A%2F%2Frclone.org&amp;resubmit=true&amp;title=rclone%20-%20rsync%20for%20cloud%20storage" target="_blank" rel="noopener" aria-label="Share on Reddit">Reddit</a><br />
<iframe src="//ghbtns.com/github-btn.html?user=rclone&amp;repo=rclone&amp;type=star&amp;count=true" allowtransparency="true" frameborder="0" scrolling="no" width="120" height="20"></iframe> <iframe src="//ghbtns.com/github-btn.html?user=rclone&amp;repo=rclone&amp;type=star&amp;count=true" allowtransparency="true" frameborder="0" scrolling="no" width="120" height="20"></iframe>
</p> </p>
</div> </div>
@@ -31,12 +31,12 @@
</div> </div>
<div class="card-body"> <div class="card-body">
<p class="menu"> <p class="menu">
<i class="fa fa-comments fa-fw" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br /> <i class="fa fa-comments" aria-hidden="true"></i> <a href="https://forum.rclone.org">Rclone forum</a><br />
<i class="fab fa-github fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br /> <i class="fab fa-github" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone">GitHub project</a><br />
<i class="fab fa-slack fa-fw" aria-hidden="true"></i> <a href="https://slack-invite.rclone.org/">Rclone slack</a><br /> <i class="fab fa-slack" aria-hidden="true"></i> <a href="https://slack-invite.rclone.org/">Rclone slack</a><br />
<i class="fa fa-book fa-fw" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br /> <i class="fa fa-book" aria-hidden="true"></i> <a href="https://github.com/rclone/rclone/wiki">Rclone Wiki</a><br />
<i class="fa fa-heart heart fa-fw" aria-hidden="true"></i> <a href="/donate/">Donate</a><br /> <i class="fa fa-heart heart" aria-hidden="true"></i> <a href="/donate/">Donate</a><br />
<i class="fab fa-twitter fa-fw" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a> <i class="fab fa-twitter" aria-hidden="true"></i> <a href="https://twitter.com/njcw">@njcw</a>
</p> </p>
</div> </div>
</div> </div>

View File

@@ -14,18 +14,18 @@
Docs Docs
</a> </a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown"> <div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="/install/"><i class="fa fa-book fa-fw"></i> Installation</a> <a class="dropdown-item" href="/install/"><i class="fa fa-book"></i> Installation</a>
<a class="dropdown-item" href="/docs/"><i class="fa fa-book fa-fw"></i> Usage</a> <a class="dropdown-item" href="/docs/"><i class="fa fa-book"></i> Usage</a>
<a class="dropdown-item" href="/filtering/"><i class="fa fa-book fa-fw"></i> Filtering</a> <a class="dropdown-item" href="/filtering/"><i class="fa fa-book"></i> Filtering</a>
<a class="dropdown-item" href="/gui/"><i class="fa fa-book fa-fw"></i> GUI</a> <a class="dropdown-item" href="/gui/"><i class="fa fa-book"></i> GUI</a>
<a class="dropdown-item" href="/rc/"><i class="fa fa-book fa-fw"></i> Remote Control</a> <a class="dropdown-item" href="/rc/"><i class="fa fa-book"></i> Remote Control</a>
<a class="dropdown-item" href="/changelog/"><i class="fa fa-book fa-fw"></i> Changelog</a> <a class="dropdown-item" href="/changelog/"><i class="fa fa-book"></i> Changelog</a>
<a class="dropdown-item" href="/bugs/"><i class="fa fa-book fa-fw"></i> Bugs</a> <a class="dropdown-item" href="/bugs/"><i class="fa fa-book"></i> Bugs</a>
<a class="dropdown-item" href="/faq/"><i class="fa fa-book fa-fw"></i> FAQ</a> <a class="dropdown-item" href="/faq/"><i class="fa fa-book"></i> FAQ</a>
<a class="dropdown-item" href="/flags/"><i class="fa fa-book fa-fw"></i> Flags</a> <a class="dropdown-item" href="/flags/"><i class="fa fa-book"></i> Flags</a>
<a class="dropdown-item" href="/licence/"><i class="fa fa-book fa-fw"></i> Licence</a> <a class="dropdown-item" href="/licence/"><i class="fa fa-book"></i> Licence</a>
<a class="dropdown-item" href="/authors/"><i class="fa fa-book fa-fw"></i> Authors</a> <a class="dropdown-item" href="/authors/"><i class="fa fa-book"></i> Authors</a>
<a class="dropdown-item" href="/privacy/"><i class="fa fa-book fa-fw"></i> Privacy Policy</a> <a class="dropdown-item" href="/privacy/"><i class="fa fa-book"></i> Privacy Policy</a>
</div> </div>
</li> </li>
<li class="nav-item active dropdown"> <li class="nav-item active dropdown">
@@ -33,12 +33,12 @@
Commands Commands
</a> </a>
<div class="dropdown-menu pre-scrollable" aria-labelledby="navbarDropdown"> <div class="dropdown-menu pre-scrollable" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="/commands/"><i class="fas fa-map fa-fw"></i> Overview</a> <a class="dropdown-item" href="/commands/"><i class="fas fa-map"></i> Overview</a>
<div class="dropdown-divider"></div> <div class="dropdown-divider"></div>
{{ with .Site.GetPage "/commands" }} {{ with .Site.GetPage "/commands" }}
{{ range .Data.Pages }} {{ range .Data.Pages }}
{{ if lt (countwords .Title) 3 }} {{ if lt (countwords .Title) 3 }}
<a class="dropdown-item" href="{{ .RelPermalink }}"><i class="fa fa-book fa-fw"></i> {{ .Title | markdownify }}</a> <a class="dropdown-item" href="{{ .RelPermalink }}"><i class="fa fa-book"></i> {{ .Title | markdownify }}</a>
{{ end }} {{ end }}
{{ end }} {{ end }}
{{ end }} {{ end }}
@@ -49,65 +49,65 @@
Storage Systems Storage Systems
</a> </a>
<div class="dropdown-menu pre-scrollable" aria-labelledby="navbarDropdown"> <div class="dropdown-menu pre-scrollable" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="/overview/"><i class="fas fa-map fa-fw"></i> Overview</a> <a class="dropdown-item" href="/overview/"><i class="fas fa-map"></i> Overview</a>
<div class="dropdown-divider"></div> <div class="dropdown-divider"></div>
<a class="dropdown-item" href="/fichier/"><i class="fa fa-archive fa-fw"></i> 1Fichier</a> <a class="dropdown-item" href="/fichier/"><i class="fa fa-archive"></i> 1Fichier</a>
<a class="dropdown-item" href="/netstorage/"><i class="fas fa-database fa-fw"></i> Akamai NetStorage</a> <a class="dropdown-item" href="/netstorage/"><i class="fas fa-database"></i> Akamai NetStorage</a>
<a class="dropdown-item" href="/alias/"><i class="fa fa-link fa-fw"></i> Alias</a> <a class="dropdown-item" href="/alias/"><i class="fa fa-link"></i> Alias</a>
<a class="dropdown-item" href="/amazonclouddrive/"><i class="fab fa-amazon fa-fw"></i> Amazon Drive</a> <a class="dropdown-item" href="/amazonclouddrive/"><i class="fab fa-amazon"></i> Amazon Drive</a>
<a class="dropdown-item" href="/s3/"><i class="fab fa-amazon fa-fw"></i> Amazon S3</a> <a class="dropdown-item" href="/s3/"><i class="fab fa-amazon"></i> Amazon S3</a>
<a class="dropdown-item" href="/b2/"><i class="fa fa-fire fa-fw"></i> Backblaze B2</a> <a class="dropdown-item" href="/b2/"><i class="fa fa-fire"></i> Backblaze B2</a>
<a class="dropdown-item" href="/box/"><i class="fa fa-archive fa-fw"></i> Box</a> <a class="dropdown-item" href="/box/"><i class="fa fa-archive"></i> Box</a>
<a class="dropdown-item" href="/chunker/"><i class="fa fa-cut fa-fw"></i> Chunker (splits large files)</a> <a class="dropdown-item" href="/chunker/"><i class="fa fa-cut"></i> Chunker (splits large files)</a>
<a class="dropdown-item" href="/compress/"><i class="fas fa-compress fa-fw"></i> Compress (transparent gzip compression)</a> <a class="dropdown-item" href="/compress/"><i class="fas fa-compress"></i> Compress (transparent gzip compression)</a>
<a class="dropdown-item" href="/combine/"><i class="fa fa-folder-plus fa-fw"></i> Combine (remotes into a directory tree)</a> <a class="dropdown-item" href="/combine/"><i class="fa fa-folder-plus"></i> Combine (remotes into a directory tree)</a>
<a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square fa-fw"></i> Citrix ShareFile</a> <a class="dropdown-item" href="/sharefile/"><i class="fas fa-share-square"></i> Citrix ShareFile</a>
<a class="dropdown-item" href="/crypt/"><i class="fa fa-lock fa-fw"></i> Crypt (encrypts the others)</a> <a class="dropdown-item" href="/crypt/"><i class="fa fa-lock"></i> Crypt (encrypts the others)</a>
<a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud fa-fw"></i> Digi Storage</a> <a class="dropdown-item" href="/koofr/#digi-storage"><i class="fa fa-cloud"></i> Digi Storage</a>
<a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox fa-fw"></i> Dropbox</a> <a class="dropdown-item" href="/dropbox/"><i class="fab fa-dropbox"></i> Dropbox</a>
<a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud fa-fw"></i> Enterprise File Fabric</a> <a class="dropdown-item" href="/filefabric/"><i class="fa fa-cloud"></i> Enterprise File Fabric</a>
<a class="dropdown-item" href="/ftp/"><i class="fa fa-file fa-fw"></i> FTP</a> <a class="dropdown-item" href="/ftp/"><i class="fa fa-file"></i> FTP</a>
<a class="dropdown-item" href="/googlecloudstorage/"><i class="fab fa-google fa-fw"></i> Google Cloud Storage</a> <a class="dropdown-item" href="/googlecloudstorage/"><i class="fab fa-google"></i> Google Cloud Storage</a>
<a class="dropdown-item" href="/drive/"><i class="fab fa-google fa-fw"></i> Google Drive</a> <a class="dropdown-item" href="/drive/"><i class="fab fa-google"></i> Google Drive</a>
<a class="dropdown-item" href="/googlephotos/"><i class="fas fa-images fa-fw"></i> Google Photos</a> <a class="dropdown-item" href="/googlephotos/"><i class="fas fa-images"></i> Google Photos</a>
<a class="dropdown-item" href="/hasher/"><i class="fa fa-check-double fa-fw"></i> Hasher (better checksums for others)</a> <a class="dropdown-item" href="/hasher/"><i class="fa fa-check-double"></i> Hasher (better checksums for others)</a>
<a class="dropdown-item" href="/hdfs/"><i class="fa fa-globe fa-fw"></i> HDFS (Hadoop Distributed Filesystem)</a> <a class="dropdown-item" href="/hdfs/"><i class="fa fa-globe"></i> HDFS (Hadoop Distributed Filesystem)</a>
<a class="dropdown-item" href="/hidrive/"><i class="fa fa-cloud fa-fw"></i> HiDrive</a> <a class="dropdown-item" href="/hidrive/"><i class="fa fa-cloud"></i> HiDrive</a>
<a class="dropdown-item" href="/http/"><i class="fa fa-globe fa-fw"></i> HTTP</a> <a class="dropdown-item" href="/http/"><i class="fa fa-globe"></i> HTTP</a>
<a class="dropdown-item" href="/hubic/"><i class="fa fa-space-shuttle fa-fw"></i> Hubic</a> <a class="dropdown-item" href="/hubic/"><i class="fa fa-space-shuttle"></i> Hubic</a>
<a class="dropdown-item" href="/internetarchive/"><i class="fa fa-archive fa-fw"></i> Internet Archive</a> <a class="dropdown-item" href="/internetarchive/"><i class="fa fa-archive"></i> Internet Archive</a>
<a class="dropdown-item" href="/jottacloud/"><i class="fa fa-cloud fa-fw"></i> Jottacloud</a> <a class="dropdown-item" href="/jottacloud/"><i class="fa fa-cloud"></i> Jottacloud</a>
<a class="dropdown-item" href="/koofr/"><i class="fa fa-suitcase fa-fw"></i> Koofr</a> <a class="dropdown-item" href="/koofr/"><i class="fa fa-suitcase"></i> Koofr</a>
<a class="dropdown-item" href="/mailru/"><i class="fa fa-at fa-fw"></i> Mail.ru Cloud</a> <a class="dropdown-item" href="/mailru/"><i class="fa fa-at"></i> Mail.ru Cloud</a>
<a class="dropdown-item" href="/mega/"><i class="fa fa-archive fa-fw"></i> Mega</a> <a class="dropdown-item" href="/mega/"><i class="fa fa-archive"></i> Mega</a>
<a class="dropdown-item" href="/memory/"><i class="fas fa-memory fa-fw"></i> Memory</a> <a class="dropdown-item" href="/memory/"><i class="fas fa-memory"></i> Memory</a>
<a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows fa-fw"></i> Microsoft Azure Blob Storage</a> <a class="dropdown-item" href="/azureblob/"><i class="fab fa-windows"></i> Microsoft Azure Blob Storage</a>
<a class="dropdown-item" href="/onedrive/"><i class="fab fa-windows fa-fw"></i> Microsoft OneDrive</a> <a class="dropdown-item" href="/onedrive/"><i class="fab fa-windows"></i> Microsoft OneDrive</a>
<a class="dropdown-item" href="/opendrive/"><i class="fa fa-space-shuttle fa-fw"></i> OpenDrive</a> <a class="dropdown-item" href="/opendrive/"><i class="fa fa-space-shuttle"></i> OpenDrive</a>
<a class="dropdown-item" href="/qingstor/"><i class="fas fa-hdd fa-fw"></i> QingStor</a> <a class="dropdown-item" href="/qingstor/"><i class="fas fa-hdd"></i> QingStor</a>
<a class="dropdown-item" href="/swift/"><i class="fa fa-space-shuttle fa-fw"></i> Openstack Swift</a> <a class="dropdown-item" href="/swift/"><i class="fa fa-space-shuttle"></i> Openstack Swift</a>
<a class="dropdown-item" href="/pcloud/"><i class="fa fa-cloud fa-fw"></i> pCloud</a> <a class="dropdown-item" href="/pcloud/"><i class="fa fa-cloud"></i> pCloud</a>
<a class="dropdown-item" href="/premiumizeme/"><i class="fa fa-user fa-fw"></i> premiumize.me</a> <a class="dropdown-item" href="/premiumizeme/"><i class="fa fa-user"></i> premiumize.me</a>
<a class="dropdown-item" href="/putio/"><i class="fas fa-parking fa-fw"></i> put.io</a> <a class="dropdown-item" href="/putio/"><i class="fas fa-parking"></i> put.io</a>
<a class="dropdown-item" href="/seafile/"><i class="fa fa-server fa-fw"></i> Seafile</a> <a class="dropdown-item" href="/seafile/"><i class="fa fa-server"></i> Seafile</a>
<a class="dropdown-item" href="/sftp/"><i class="fa fa-server fa-fw"></i> SFTP</a> <a class="dropdown-item" href="/sftp/"><i class="fa fa-server"></i> SFTP</a>
<a class="dropdown-item" href="/sia/"><i class="fa fa-globe fa-fw"></i> Sia</a> <a class="dropdown-item" href="/sia/"><i class="fa fa-globe"></i> Sia</a>
<a class="dropdown-item" href="/storj/"><i class="fas fa-dove fa-fw"></i> Storj</a> <a class="dropdown-item" href="/storj/"><i class="fas fa-dove"></i> Storj</a>
<a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove fa-fw"></i> SugarSync</a> <a class="dropdown-item" href="/sugarsync/"><i class="fas fa-dove"></i> SugarSync</a>
<a class="dropdown-item" href="/uptobox/"><i class="fa fa-archive fa-fw"></i> Uptobox</a> <a class="dropdown-item" href="/uptobox/"><i class="fa fa-archive"></i> Uptobox</a>
<a class="dropdown-item" href="/union/"><i class="fa fa-link fa-fw"></i> Union (merge backends)</a> <a class="dropdown-item" href="/union/"><i class="fa fa-link"></i> Union (merge backends)</a>
<a class="dropdown-item" href="/webdav/"><i class="fa fa-server fa-fw"></i> WebDAV</a> <a class="dropdown-item" href="/webdav/"><i class="fa fa-server"></i> WebDAV</a>
<a class="dropdown-item" href="/yandex/"><i class="fa fa-space-shuttle fa-fw"></i> Yandex Disk</a> <a class="dropdown-item" href="/yandex/"><i class="fa fa-space-shuttle"></i> Yandex Disk</a>
<a class="dropdown-item" href="/zoho/"><i class="fas fa-folder fa-fw"></i> Zoho WorkDrive</a> <a class="dropdown-item" href="/zoho/"><i class="fas fa-folder"></i> Zoho WorkDrive</a>
<div class="dropdown-divider"></div> <div class="dropdown-divider"></div>
<a class="dropdown-item" href="/local/"><i class="fas fa-hdd fa-fw"></i> The local filesystem</a> <a class="dropdown-item" href="/local/"><i class="fas fa-hdd"></i> The local filesystem</a>
</div> </div>
</li> </li>
<li class="nav-item active"> <li class="nav-item active">
<a class="nav-link" href="/contact/"><i class="fa fa-envelope fa-fw"></i> Contact</a> <a class="nav-link" href="/contact/"><i class="fa fa-envelope"></i> Contact</a>
</li> </li>
<li class="nav-item active"> <li class="nav-item active">
<a class="nav-link" href="/donate/"><i class="fa fa-heart heart fa-fw"></i> Donate</a> <a class="nav-link" href="/donate/"><i class="fa fa-heart heart"></i> Donate</a>
</li> </li>
</ul> </ul>
<form class="form-inline" name="search_form" action="https://google.com/search" target="_blank" onsubmit="on_search();"> <form class="form-inline" name="search_form" action="https://google.com/search" target="_blank" onsubmit="on_search();">

View File

@@ -142,6 +142,18 @@ h5 {
font-size: 95%; font-size: 95%;
} }
/* Align dropdown items when icons have different sizes */
.dropdown-item .fa, .fab, .fad, .fal, .far, .fas {
width: 20px;
text-align: center;
}
/* Align menu items when icons have different sizes */
.menu .fa, .fab, .fad, .fal, .far, .fas {
width: 18px;
text-align: center;
}
/* Make primary buttons rclone colours. Should learn sass and do this the proper way! */ /* Make primary buttons rclone colours. Should learn sass and do this the proper way! */
.btn-primary { .btn-primary {
background-color: #3f79ad; background-color: #3f79ad;

View File

@@ -29,12 +29,6 @@ var (
return errors.New("no config file set handler") return errors.New("no config file set handler")
} }
// Check if the config file has the named section
//
// This is a function pointer to decouple the config
// implementation from the fs
ConfigFileHasSection = func(section string) bool { return false }
// CountError counts an error. If any errors have been // CountError counts an error. If any errors have been
// counted then rclone will exit with a non zero error code. // counted then rclone will exit with a non zero error code.
// //

View File

@@ -117,9 +117,6 @@ func init() {
// Set the function pointers up in fs // Set the function pointers up in fs
fs.ConfigFileGet = FileGetFlag fs.ConfigFileGet = FileGetFlag
fs.ConfigFileSet = SetValueAndSave fs.ConfigFileSet = SetValueAndSave
fs.ConfigFileHasSection = func(section string) bool {
return LoadedData().HasSection(section)
}
configPath = makeConfigPath() configPath = makeConfigPath()
cacheDir = makeCacheDir() // Has fallback to tempDir, so set that first cacheDir = makeCacheDir() // Has fallback to tempDir, so set that first
data = newDefaultStorage() data = newDefaultStorage()

View File

@@ -26,6 +26,9 @@ var (
// When nil, no encryption will be used for saving. // When nil, no encryption will be used for saving.
configKey []byte configKey []byte
// PasswordPromptOutput is output of prompt for password
PasswordPromptOutput = os.Stderr
// PassConfigKeyForDaemonization if set to true, the configKey // PassConfigKeyForDaemonization if set to true, the configKey
// is obscured with obscure.Obscure and saved to a temp file // is obscured with obscure.Obscure and saved to a temp file
// when it is calculated from the password. The path of that // when it is calculated from the password. The path of that

View File

@@ -716,9 +716,9 @@ func checkPassword(password string) (string, error) {
// GetPassword asks the user for a password with the prompt given. // GetPassword asks the user for a password with the prompt given.
func GetPassword(prompt string) string { func GetPassword(prompt string) string {
_, _ = fmt.Fprintln(terminal.Out, prompt) _, _ = fmt.Fprintln(PasswordPromptOutput, prompt)
for { for {
_, _ = fmt.Fprint(terminal.Out, "password:") _, _ = fmt.Fprint(PasswordPromptOutput, "password:")
password := ReadPassword() password := ReadPassword()
password, err := checkPassword(password) password, err := checkPassword(password)
if err == nil { if err == nil {

View File

@@ -29,7 +29,6 @@ type Features struct {
ReadMetadata bool // can read metadata from objects ReadMetadata bool // can read metadata from objects
WriteMetadata bool // can write metadata to objects WriteMetadata bool // can write metadata to objects
UserMetadata bool // can read/write general purpose metadata UserMetadata bool // can read/write general purpose metadata
FilterAware bool // can make use of filters if provided for listing
// Purge all files in the directory specified // Purge all files in the directory specified
// //
@@ -321,7 +320,6 @@ func (ft *Features) Mask(ctx context.Context, f Fs) *Features {
// ft.IsLocal = ft.IsLocal && mask.IsLocal Don't propagate IsLocal // ft.IsLocal = ft.IsLocal && mask.IsLocal Don't propagate IsLocal
ft.SlowModTime = ft.SlowModTime && mask.SlowModTime ft.SlowModTime = ft.SlowModTime && mask.SlowModTime
ft.SlowHash = ft.SlowHash && mask.SlowHash ft.SlowHash = ft.SlowHash && mask.SlowHash
ft.FilterAware = ft.FilterAware && mask.FilterAware
if mask.Purge == nil { if mask.Purge == nil {
ft.Purge = nil ft.Purge = nil

View File

@@ -9,17 +9,17 @@ import (
"log" "log"
"os" "os"
"github.com/rclone/rclone/lib/terminal" "github.com/rclone/rclone/fs/config"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
// redirectStderr to the file passed in // redirectStderr to the file passed in
func redirectStderr(f *os.File) { func redirectStderr(f *os.File) {
termFd, err := unix.Dup(int(os.Stderr.Fd())) passPromptFd, err := unix.Dup(int(os.Stderr.Fd()))
if err != nil { if err != nil {
log.Fatalf("Failed to duplicate stderr: %v", err) log.Fatalf("Failed to duplicate stderr: %v", err)
} }
terminal.RawOut = os.NewFile(uintptr(termFd), "termOut") config.PasswordPromptOutput = os.NewFile(uintptr(passPromptFd), "passPrompt")
err = unix.Dup2(int(f.Fd()), int(os.Stderr.Fd())) err = unix.Dup2(int(f.Fd()), int(os.Stderr.Fd()))
if err != nil { if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err) log.Fatalf("Failed to redirect stderr to file: %v", err)

View File

@@ -12,43 +12,29 @@ package log
import ( import (
"log" "log"
"os" "os"
"syscall"
"github.com/rclone/rclone/lib/terminal"
"golang.org/x/sys/windows"
) )
// dup oldfd creating a functional copy as newfd var (
// conceptually the same as the unix `dup()` function kernel32 = syscall.MustLoadDLL("kernel32.dll")
func dup(oldfd uintptr) (newfd uintptr, err error) { procSetStdHandle = kernel32.MustFindProc("SetStdHandle")
var ( )
newfdHandle windows.Handle
processHandle = windows.CurrentProcess() func setStdHandle(stdhandle int32, handle syscall.Handle) error {
) r0, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
err = windows.DuplicateHandle( if r0 == 0 {
processHandle, // hSourceProcessHandle if e1 != 0 {
windows.Handle(oldfd), // hSourceHandle return error(e1)
processHandle, // hTargetProcessHandle }
&newfdHandle, // lpTargetHandle return syscall.EINVAL
0, // dwDesiredAccess
true, // bInheritHandle
windows.DUPLICATE_SAME_ACCESS, // dwOptions
)
if err != nil {
return 0, err
} }
return uintptr(newfdHandle), nil return nil
} }
// redirectStderr to the file passed in // redirectStderr to the file passed in
func redirectStderr(f *os.File) { func redirectStderr(f *os.File) {
termFd, err := dup(os.Stderr.Fd()) err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd()))
if err != nil {
log.Fatalf("Failed to duplicate stderr: %v", err)
}
terminal.RawOut = os.NewFile(termFd, "termOut")
err = windows.SetStdHandle(windows.STD_ERROR_HANDLE, windows.Handle(f.Fd()))
if err != nil { if err != nil {
log.Fatalf("Failed to redirect stderr to file: %v", err) log.Fatalf("Failed to redirect stderr to file: %v", err)
} }
os.Stderr = f
} }

View File

@@ -83,7 +83,7 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and
!(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse) !(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse)
return func(dir string) (entries fs.DirEntries, err error) { return func(dir string) (entries fs.DirEntries, err error) {
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List dirCtx := filter.SetUseFilter(m.Ctx, !includeAll) // make filter-aware backends constrain List
return list.DirSorted(dirCtx, f, includeAll, dir) return list.DirSorted(dirCtx, f, includeAll, dir)
} }
} }
@@ -100,7 +100,7 @@ func (m *March) makeListDir(ctx context.Context, f fs.Fs, includeAll bool) listD
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
if !started { if !started {
dirCtx := filter.SetUseFilter(m.Ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List dirCtx := filter.SetUseFilter(m.Ctx, !includeAll) // make filter-aware backends constrain List
dirs, dirsErr = walk.NewDirTree(dirCtx, f, m.Dir, includeAll, ci.MaxDepth) dirs, dirsErr = walk.NewDirTree(dirCtx, f, m.Dir, includeAll, ci.MaxDepth)
started = true started = true
} }

View File

@@ -26,9 +26,6 @@ import (
// up with drive letters. // up with drive letters.
func NewFs(ctx context.Context, path string) (Fs, error) { func NewFs(ctx context.Context, path string) (Fs, error) {
Debugf(nil, "Creating backend with remote %q", path) Debugf(nil, "Creating backend with remote %q", path)
if ConfigFileHasSection(path) {
Logf(nil, "%q refers to a local folder, use %q to refer to your remote or %q to hide this warning", path, path+":", "./"+path)
}
fsInfo, configName, fsPath, config, err := ConfigFs(path) fsInfo, configName, fsPath, config, err := ConfigFs(path)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -64,7 +64,7 @@ type Func func(path string, entries fs.DirEntries, err error) error
func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error {
ci := fs.GetConfig(ctx) ci := fs.GetConfig(ctx)
fi := filter.GetConfig(ctx) fi := filter.GetConfig(ctx)
ctx = filter.SetUseFilter(ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List ctx = filter.SetUseFilter(ctx, !includeAll) // make filter-aware backends constrain List
if ci.NoTraverse && fi.HaveFilesFrom() { if ci.NoTraverse && fi.HaveFilesFrom() {
return walkR(ctx, f, path, includeAll, maxLevel, fn, fi.MakeListR(ctx, f.NewObject)) return walkR(ctx, f, path, includeAll, maxLevel, fn, fi.MakeListR(ctx, f.NewObject))
} }
@@ -158,7 +158,7 @@ func ListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel
fi.UsesDirectoryFilters() { // ...using any directory filters fi.UsesDirectoryFilters() { // ...using any directory filters
return listRwalk(ctx, f, path, includeAll, maxLevel, listType, fn) return listRwalk(ctx, f, path, includeAll, maxLevel, listType, fn)
} }
ctx = filter.SetUseFilter(ctx, f.Features().FilterAware && !includeAll) // make filter-aware backends constrain List ctx = filter.SetUseFilter(ctx, !includeAll) // make filter-aware backends constrain List
return listR(ctx, f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased) return listR(ctx, f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased)
} }

2
go.mod
View File

@@ -135,7 +135,7 @@ require (
github.com/golang-jwt/jwt/v4 v4.1.0 // indirect github.com/golang-jwt/jwt/v4 v4.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-querystring v1.1.0 // indirect github.com/google/go-querystring v1.1.0 // indirect
github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff
github.com/pkg/xattr v0.4.7 github.com/pkg/xattr v0.4.7
golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105 golang.org/x/mobile v0.0.0-20220722155234-aaac322e2105
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035 golang.org/x/term v0.0.0-20220722155259-a9ba230a4035

2
go.sum
View File

@@ -380,8 +380,6 @@ github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff h1:tN6UCYCBFNrPwvKf4RP9cIhGo6GcZ/IQTN8nqD7eCok= github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff h1:tN6UCYCBFNrPwvKf4RP9cIhGo6GcZ/IQTN8nqD7eCok=
github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE= github.com/jlaffaye/ftp v0.0.0-20220630165035-11536801d1ff/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE=
github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a h1:s4ryRQyC5HKZh6qkjNAFcvmD7gImK5bZuj/YZkXy1vw=
github.com/jlaffaye/ftp v0.0.0-20220904184306-99be0634ab9a/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=

View File

@@ -68,23 +68,17 @@ const (
var ( var (
// make sure that start is only called once // make sure that start is only called once
once sync.Once once sync.Once
// RawOut is the underlying *os.File intended for terminal output
RawOut = os.Stderr
) )
// Start the terminal - must be called before use // Start the terminal - must be called before use
func Start() { func Start() {
once.Do(func() { once.Do(func() {
f := RawOut f := os.Stdout
if !IsTerminal(int(f.Fd())) { if !IsTerminal(int(f.Fd())) {
// If output is not a tty then remove escape codes // If stdout not a tty then remove escape codes
Out = colorable.NewNonColorable(f) Out = colorable.NewNonColorable(f)
} else if runtime.GOOS == "windows" && os.Getenv("TERM") != "" { } else if runtime.GOOS == "windows" && os.Getenv("TERM") != "" {
// If TERM is set on Windows then we should just send output // If TERM is set just use stdout
// straight to the terminal for cygwin/git bash environments.
// We don't want to use NewColorable here because it will
// use Windows console calls which cygwin/git bash don't support.
Out = f Out = f
} else { } else {
Out = colorable.NewColorable(f) Out = colorable.NewColorable(f)

View File

@@ -34,5 +34,5 @@ func ReadPassword(fd int) ([]byte, error) {
// WriteTerminalTitle writes a string to the terminal title // WriteTerminalTitle writes a string to the terminal title
func WriteTerminalTitle(title string) { func WriteTerminalTitle(title string) {
fmt.Fprintf(Out, ChangeTitle+title+BEL) fmt.Printf(ChangeTitle + title + BEL)
} }