mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
18 Commits
4d19afdbbf
...
be2c44f5af
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
be2c44f5af | ||
|
|
1db0f51be4 | ||
|
|
6440052fbd | ||
|
|
4afb59bc93 | ||
|
|
0343670375 | ||
|
|
5b2b372ba9 | ||
|
|
08c35ae741 | ||
|
|
ecea0cd6f9 | ||
|
|
80e6389a50 | ||
|
|
a3ccf4d8a0 | ||
|
|
31df39d356 | ||
|
|
03d3811f7f | ||
|
|
83b83f7768 | ||
|
|
71138082ea | ||
|
|
cf94824426 | ||
|
|
16971ab6b9 | ||
|
|
9f75af38e3 | ||
|
|
b5e4d39b05 |
@@ -51,7 +51,7 @@ type LifecycleRule struct {
|
||||
// ServerSideEncryption is a configuration object for B2 Server-Side Encryption
|
||||
type ServerSideEncryption struct {
|
||||
Mode string `json:"mode"`
|
||||
Algorithm string `json:"algorithm"` // Encryption algorith to use
|
||||
Algorithm string `json:"algorithm"` // Encryption algorithm to use
|
||||
CustomerKey string `json:"customerKey"` // User provided Base64 encoded key that is used by the server to encrypt files
|
||||
CustomerKeyMd5 string `json:"customerKeyMd5"` // An MD5 hash of the decoded key
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
errNotWithVersions = errors.New("can't modify files in --b2-versions mode")
|
||||
errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
|
||||
)
|
||||
|
||||
@@ -2334,7 +2334,10 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
bucket, bucketPath := o.split()
|
||||
if o.fs.opt.Versions {
|
||||
return errNotWithVersions
|
||||
t, path := api.RemoveVersion(bucketPath)
|
||||
if !t.IsZero() {
|
||||
return o.fs.deleteByID(ctx, o.id, path)
|
||||
}
|
||||
}
|
||||
if o.fs.opt.VersionAt.IsSet() {
|
||||
return errNotWithVersionAt
|
||||
|
||||
@@ -87,13 +87,11 @@ func init() {
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
||||
var err error
|
||||
// If using box config.json, use JWT auth
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
if usesJWTAuth(m) {
|
||||
err = refreshJWTToken(ctx, name, m)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure token with jwt authentication: %w", err)
|
||||
}
|
||||
@@ -114,6 +112,11 @@ func init() {
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\n\nLeave blank normally." + env.ShellExpandHelp,
|
||||
}, {
|
||||
Name: "config_credentials",
|
||||
Help: "Box App config.json contents.\n\nLeave blank normally.",
|
||||
Hide: fs.OptionHideBoth,
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "access_token",
|
||||
Help: "Box App Primary Access Token\n\nLeave blank normally.",
|
||||
@@ -184,9 +187,17 @@ See: https://developer.box.com/guides/authentication/jwt/as-user/
|
||||
})
|
||||
}
|
||||
|
||||
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
||||
jsonFile = env.ShellExpand(jsonFile)
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
func usesJWTAuth(m configmap.Mapper) bool {
|
||||
jsonFile, okFile := m.Get("box_config_file")
|
||||
jsonFileCredentials, okCredentials := m.Get("config_credentials")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
return (okFile || okCredentials) && boxSubTypeOk && (jsonFile != "" || jsonFileCredentials != "") && boxSubType != ""
|
||||
}
|
||||
|
||||
func refreshJWTToken(ctx context.Context, name string, m configmap.Mapper) error {
|
||||
boxSubType, _ := m.Get("box_sub_type")
|
||||
|
||||
boxConfig, err := getBoxConfig(m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get box config: %w", err)
|
||||
}
|
||||
@@ -205,12 +216,19 @@ func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, na
|
||||
return err
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
func getBoxConfig(m configmap.Mapper) (boxConfig *api.ConfigJSON, err error) {
|
||||
configFileCredentials, _ := m.Get("config_credentials")
|
||||
configFileBytes := []byte(configFileCredentials)
|
||||
|
||||
if configFileCredentials == "" {
|
||||
configFile, _ := m.Get("box_config_file")
|
||||
configFileBytes, err = os.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to read Box config: %w", err)
|
||||
}
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
|
||||
err = json.Unmarshal(configFileBytes, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("box: failed to parse Box config: %w", err)
|
||||
}
|
||||
@@ -485,15 +503,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
f.srv.SetHeader("as-user", f.opt.Impersonate)
|
||||
}
|
||||
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
|
||||
if ts != nil {
|
||||
// If using box config.json and JWT, renewing should just refresh the token and
|
||||
// should do so whether there are uploads pending or not.
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
if usesJWTAuth(m) {
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
||||
err := refreshJWTToken(ctx, name, m)
|
||||
return err
|
||||
})
|
||||
f.tokenRenewer.Start()
|
||||
|
||||
@@ -1330,6 +1330,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
result, err = f.srv.MoveV2(&arg)
|
||||
switch e := err.(type) {
|
||||
case files.MoveV2APIError:
|
||||
// There seems to be a bit of eventual consistency here which causes this to
|
||||
// fail on just created objects
|
||||
// See: https://github.com/rclone/rclone/issues/8881
|
||||
if e.EndpointError != nil && e.EndpointError.FromLookup != nil && e.EndpointError.FromLookup.Tag == files.LookupErrorNotFound {
|
||||
fs.Debugf(srcObj, "Retrying move on %v error", err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1292,7 +1292,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
// See: https://github.com/rclone/rclone/issues/3445#issuecomment-521654257
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend:
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable, ftp.StatusAboutToSend, ftp.StatusRequestedFileActionOK:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
@@ -37,6 +38,10 @@ func init() {
|
||||
Description: "HTTP",
|
||||
NewFs: NewFs,
|
||||
CommandHelp: commandHelp,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `HTTP metadata keys are case insensitive and are always returned in lower case.`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of HTTP host to connect to.\n\nE.g. \"https://example.com\", or \"https://user:pass@example.com\" to use a username and password.",
|
||||
@@ -98,6 +103,40 @@ sizes of any files, and some files that don't exist may be in the listing.`,
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"cache-control": {
|
||||
Help: "Cache-Control header",
|
||||
Type: "string",
|
||||
Example: "no-cache",
|
||||
},
|
||||
"content-disposition": {
|
||||
Help: "Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "inline",
|
||||
},
|
||||
"content-disposition-filename": {
|
||||
Help: "Filename retrieved from Content-Disposition header",
|
||||
Type: "string",
|
||||
Example: "file.txt",
|
||||
},
|
||||
"content-encoding": {
|
||||
Help: "Content-Encoding header",
|
||||
Type: "string",
|
||||
Example: "gzip",
|
||||
},
|
||||
"content-language": {
|
||||
Help: "Content-Language header",
|
||||
Type: "string",
|
||||
Example: "en-US",
|
||||
},
|
||||
"content-type": {
|
||||
Help: "Content-Type header",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
},
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
@@ -126,6 +165,13 @@ type Object struct {
|
||||
size int64
|
||||
modTime time.Time
|
||||
contentType string
|
||||
|
||||
// Metadata as pointers to strings as they often won't be present
|
||||
contentDisposition *string // Content-Disposition: header
|
||||
contentDispositionFilename *string // Filename retrieved from Content-Disposition: header
|
||||
cacheControl *string // Cache-Control: header
|
||||
contentEncoding *string // Content-Encoding: header
|
||||
contentLanguage *string // Content-Language: header
|
||||
}
|
||||
|
||||
// statusError returns an error if the res contained an error
|
||||
@@ -277,6 +323,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
ci: ci,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMetadata: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(ctx, f)
|
||||
|
||||
@@ -429,6 +476,29 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// parseFilename extracts the filename from a Content-Disposition header
|
||||
func parseFilename(contentDisposition string) (string, error) {
|
||||
// Normalize the contentDisposition to canonical MIME format
|
||||
mediaType, params, err := mime.ParseMediaType(contentDisposition)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to parse contentDisposition: %v", err)
|
||||
}
|
||||
|
||||
// Check if the contentDisposition is an attachment
|
||||
if strings.ToLower(mediaType) != "attachment" {
|
||||
return "", fmt.Errorf("not an attachment: %s", mediaType)
|
||||
}
|
||||
|
||||
// Extract the filename from the parameters
|
||||
filename, ok := params["filename"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("filename not found in contentDisposition")
|
||||
}
|
||||
|
||||
// Decode filename if it contains special encoding
|
||||
return textproto.TrimString(filename), nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func addHeaders(req *http.Request, opt *Options) {
|
||||
for i := 0; i < len(opt.Headers); i += 2 {
|
||||
@@ -577,6 +647,9 @@ func (o *Object) String() string {
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
if o.contentDispositionFilename != nil {
|
||||
return *o.contentDispositionFilename
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
@@ -634,6 +707,29 @@ func (o *Object) decodeMetadata(ctx context.Context, res *http.Response) error {
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
o.size = rest.ParseSizeFromHeaders(res.Header)
|
||||
contentDisposition := res.Header.Get("Content-Disposition")
|
||||
if contentDisposition != "" {
|
||||
o.contentDisposition = &contentDisposition
|
||||
}
|
||||
if o.contentDisposition != nil {
|
||||
var filename string
|
||||
filename, err = parseFilename(*o.contentDisposition)
|
||||
if err == nil && filename != "" {
|
||||
o.contentDispositionFilename = &filename
|
||||
}
|
||||
}
|
||||
cacheControl := res.Header.Get("Cache-Control")
|
||||
if cacheControl != "" {
|
||||
o.cacheControl = &cacheControl
|
||||
}
|
||||
contentEncoding := res.Header.Get("Content-Encoding")
|
||||
if contentEncoding != "" {
|
||||
o.contentEncoding = &contentEncoding
|
||||
}
|
||||
contentLanguage := res.Header.Get("Content-Language")
|
||||
if contentLanguage != "" {
|
||||
o.contentLanguage = &contentLanguage
|
||||
}
|
||||
|
||||
// If NoSlash is set then check ContentType to see if it is a directory
|
||||
if o.fs.opt.NoSlash {
|
||||
@@ -772,6 +868,30 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
}
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an object
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
metadata = make(fs.Metadata, 6)
|
||||
if o.contentType != "" {
|
||||
metadata["content-type"] = o.contentType
|
||||
}
|
||||
|
||||
// Set system metadata
|
||||
setMetadata := func(k string, v *string) {
|
||||
if v == nil || *v == "" {
|
||||
return
|
||||
}
|
||||
metadata[k] = *v
|
||||
}
|
||||
setMetadata("content-disposition", o.contentDisposition)
|
||||
setMetadata("content-disposition-filename", o.contentDispositionFilename)
|
||||
setMetadata("cache-control", o.cacheControl)
|
||||
setMetadata("content-language", o.contentLanguage)
|
||||
setMetadata("content-encoding", o.contentEncoding)
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -779,4 +899,5 @@ var (
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
)
|
||||
|
||||
@@ -60,6 +60,17 @@ func prepareServer(t *testing.T) configmap.Simple {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
|
||||
// Set the content disposition header for the fifth file
|
||||
// later we will check if it is set using the metadata method
|
||||
if r.URL.Path == "/five.txt.gz" {
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\"five.txt.gz\"")
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Content-Language", "en-US")
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
}
|
||||
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
@@ -102,27 +113,33 @@ func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, 4, len(entries))
|
||||
require.Equal(t, 5, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "four", e.Remote())
|
||||
assert.Equal(t, "five.txt.gz", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok := e.(fs.Directory)
|
||||
_, ok := e.(fs.Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "four", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[2]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(5+lineEndSize), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[2]
|
||||
e = entries[3]
|
||||
assert.Equal(t, "three", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[3]
|
||||
e = entries[4]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
if noSlash {
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
@@ -218,6 +235,23 @@ func TestNewObjectWithLeadingSlash(t *testing.T) {
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
|
||||
func TestNewObjectWithMetadata(t *testing.T) {
|
||||
f := prepare(t)
|
||||
o, err := f.NewObject(context.Background(), "/five.txt.gz")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "five.txt.gz", o.Remote())
|
||||
ho, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
metadata, err := ho.Metadata(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", metadata["content-type"])
|
||||
assert.Equal(t, "attachment; filename=\"five.txt.gz\"", metadata["content-disposition"])
|
||||
assert.Equal(t, "five.txt.gz", metadata["content-disposition-filename"])
|
||||
assert.Equal(t, "no-cache", metadata["cache-control"])
|
||||
assert.Equal(t, "en-US", metadata["content-language"])
|
||||
assert.Equal(t, "gzip", metadata["content-encoding"])
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
m := prepareServer(t)
|
||||
|
||||
|
||||
BIN
backend/http/test/files/five.txt.gz
Normal file
BIN
backend/http/test/files/five.txt.gz
Normal file
Binary file not shown.
@@ -75,7 +75,7 @@ func TestLinkValid(t *testing.T) {
|
||||
Expire: Time(time.Now().Add(time.Hour)),
|
||||
},
|
||||
expected: true,
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparseable",
|
||||
desc: "should fallback to Expire field when URL expire parameter is unparsable",
|
||||
},
|
||||
{
|
||||
name: "invalid when both URL expire and Expire field are expired",
|
||||
|
||||
@@ -137,3 +137,4 @@ use_accelerate_endpoint: true
|
||||
quirks:
|
||||
might_gzip: false # Never auto gzips objects
|
||||
use_unsigned_payload: false # AWS has trailer support which means it adds checksums in the trailer without seeking
|
||||
use_data_integrity_protections: true
|
||||
|
||||
@@ -20,20 +20,21 @@ var NewYamlMap = orderedmap.New[string, string]
|
||||
|
||||
// Quirks defines all the S3 provider quirks
|
||||
type Quirks struct {
|
||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||
ListVersion *int `yaml:"list_version,omitempty"` // 1 or 2
|
||||
ForcePathStyle *bool `yaml:"force_path_style,omitempty"` // true = path-style
|
||||
ListURLEncode *bool `yaml:"list_url_encode,omitempty"`
|
||||
UseMultipartEtag *bool `yaml:"use_multipart_etag,omitempty"`
|
||||
UseAlreadyExists *bool `yaml:"use_already_exists,omitempty"`
|
||||
UseAcceptEncodingGzip *bool `yaml:"use_accept_encoding_gzip,omitempty"`
|
||||
UseDataIntegrityProtections *bool `yaml:"use_data_integrity_protections,omitempty"`
|
||||
MightGzip *bool `yaml:"might_gzip,omitempty"`
|
||||
UseMultipartUploads *bool `yaml:"use_multipart_uploads,omitempty"`
|
||||
UseUnsignedPayload *bool `yaml:"use_unsigned_payload,omitempty"`
|
||||
UseXID *bool `yaml:"use_x_id,omitempty"`
|
||||
SignAcceptEncoding *bool `yaml:"sign_accept_encoding,omitempty"`
|
||||
CopyCutoff *int64 `yaml:"copy_cutoff,omitempty"`
|
||||
MaxUploadParts *int `yaml:"max_upload_parts,omitempty"`
|
||||
MinChunkSize *int64 `yaml:"min_chunk_size,omitempty"`
|
||||
}
|
||||
|
||||
// Provider defines the configurable data in each provider.yaml
|
||||
|
||||
149
backend/s3/s3.go
149
backend/s3/s3.go
@@ -39,6 +39,9 @@ import (
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
"github.com/ncw/swift/v2"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/chunksize"
|
||||
@@ -59,8 +62,6 @@ import (
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -574,6 +575,13 @@ circumstances or for testing.
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_data_integrity_protections",
|
||||
Help: `If true use AWS S3 data integrity protections.
|
||||
|
||||
See [AWS Docs on Data Integrity Protections](https://docs.aws.amazon.com/sdkref/latest/guide/feature-dataintegrity.html)`,
|
||||
Default: fs.Tristate{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "versions",
|
||||
Help: "Include old versions in directory listings.",
|
||||
@@ -892,67 +900,68 @@ var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
UseDualStack bool `config:"use_dual_stack"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
RequesterPays bool `config:"requester_pays"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||
Profile string `config:"profile"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
UseARNRegion bool `config:"use_arn_region"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int32 `config:"list_chunk"`
|
||||
ListVersion int `config:"list_version"`
|
||||
ListURLEncode fs.Tristate `config:"list_url_encode"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
NoHead bool `config:"no_head"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
VersionDeleted bool `config:"version_deleted"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
UseAlreadyExists fs.Tristate `config:"use_already_exists"`
|
||||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
IBMAPIKey string `config:"ibm_api_key"`
|
||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||
UseXID fs.Tristate `config:"use_x_id"`
|
||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||
Provider string `config:"provider"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
AccessKeyID string `config:"access_key_id"`
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
UseDualStack bool `config:"use_dual_stack"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
RequesterPays bool `config:"requester_pays"`
|
||||
ServerSideEncryption string `config:"server_side_encryption"`
|
||||
SSEKMSKeyID string `config:"sse_kms_key_id"`
|
||||
SSECustomerAlgorithm string `config:"sse_customer_algorithm"`
|
||||
SSECustomerKey string `config:"sse_customer_key"`
|
||||
SSECustomerKeyBase64 string `config:"sse_customer_key_base64"`
|
||||
SSECustomerKeyMD5 string `config:"sse_customer_key_md5"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
MaxUploadParts int `config:"max_upload_parts"`
|
||||
DisableChecksum bool `config:"disable_checksum"`
|
||||
SharedCredentialsFile string `config:"shared_credentials_file"`
|
||||
Profile string `config:"profile"`
|
||||
SessionToken string `config:"session_token"`
|
||||
UploadConcurrency int `config:"upload_concurrency"`
|
||||
ForcePathStyle bool `config:"force_path_style"`
|
||||
V2Auth bool `config:"v2_auth"`
|
||||
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
|
||||
UseARNRegion bool `config:"use_arn_region"`
|
||||
LeavePartsOnError bool `config:"leave_parts_on_error"`
|
||||
ListChunk int32 `config:"list_chunk"`
|
||||
ListVersion int `config:"list_version"`
|
||||
ListURLEncode fs.Tristate `config:"list_url_encode"`
|
||||
NoCheckBucket bool `config:"no_check_bucket"`
|
||||
NoHead bool `config:"no_head"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
DisableHTTP2 bool `config:"disable_http2"`
|
||||
DownloadURL string `config:"download_url"`
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
UseMultipartEtag fs.Tristate `config:"use_multipart_etag"`
|
||||
UsePresignedRequest bool `config:"use_presigned_request"`
|
||||
UseDataIntegrityProtections fs.Tristate `config:"use_data_integrity_protections"`
|
||||
Versions bool `config:"versions"`
|
||||
VersionAt fs.Time `config:"version_at"`
|
||||
VersionDeleted bool `config:"version_deleted"`
|
||||
Decompress bool `config:"decompress"`
|
||||
MightGzip fs.Tristate `config:"might_gzip"`
|
||||
UseAcceptEncodingGzip fs.Tristate `config:"use_accept_encoding_gzip"`
|
||||
NoSystemMetadata bool `config:"no_system_metadata"`
|
||||
UseAlreadyExists fs.Tristate `config:"use_already_exists"`
|
||||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||
DirectoryBucket bool `config:"directory_bucket"`
|
||||
IBMAPIKey string `config:"ibm_api_key"`
|
||||
IBMInstanceID string `config:"ibm_resource_instance_id"`
|
||||
UseXID fs.Tristate `config:"use_x_id"`
|
||||
SignAcceptEncoding fs.Tristate `config:"sign_accept_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote s3 server
|
||||
@@ -1302,6 +1311,10 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
||||
} else {
|
||||
s3Opt.EndpointOptions.UseDualStackEndpoint = aws.DualStackEndpointStateDisabled
|
||||
}
|
||||
if !opt.UseDataIntegrityProtections.Value {
|
||||
s3Opt.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
|
||||
s3Opt.ResponseChecksumValidation = aws.ResponseChecksumValidationWhenRequired
|
||||
}
|
||||
// FIXME not ported from SDK v1 - not sure what this does
|
||||
// s3Opt.UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
|
||||
})
|
||||
@@ -1497,6 +1510,7 @@ func setQuirks(opt *Options, provider *Provider) {
|
||||
set(&opt.ListURLEncode, true, provider.Quirks.ListURLEncode)
|
||||
set(&opt.UseMultipartEtag, true, provider.Quirks.UseMultipartEtag)
|
||||
set(&opt.UseAcceptEncodingGzip, true, provider.Quirks.UseAcceptEncodingGzip)
|
||||
set(&opt.UseDataIntegrityProtections, false, provider.Quirks.UseDataIntegrityProtections)
|
||||
set(&opt.MightGzip, true, provider.Quirks.MightGzip)
|
||||
set(&opt.UseAlreadyExists, true, provider.Quirks.UseAlreadyExists)
|
||||
set(&opt.UseMultipartUploads, true, provider.Quirks.UseMultipartUploads)
|
||||
@@ -1634,11 +1648,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
newRoot, leaf := path.Split(oldRoot)
|
||||
f.setRoot(newRoot)
|
||||
_, err := f.NewObject(ctx, leaf)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrorObjectNotFound) {
|
||||
// File doesn't exist or is a directory so return old f
|
||||
f.setRoot(oldRoot)
|
||||
return f, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -4538,6 +4555,10 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
ui.req.ContentLanguage = aws.String(value)
|
||||
case "content-type":
|
||||
ui.req.ContentType = aws.String(value)
|
||||
case "if-match":
|
||||
ui.req.IfMatch = aws.String(value)
|
||||
case "if-none-match":
|
||||
ui.req.IfNoneMatch = aws.String(value)
|
||||
case "x-amz-tagging":
|
||||
ui.req.Tagging = aws.String(value)
|
||||
default:
|
||||
|
||||
@@ -136,7 +136,7 @@ func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenO
|
||||
size, err := u.upload.stream.Read(data)
|
||||
|
||||
if err != nil {
|
||||
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
|
||||
fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data stream: %v", cnt, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -961,7 +961,7 @@ func (o *Object) setMetaData(info *api.ResourceInfoResponse) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// readMetaData reads ands sets the new metadata for a storage.Object
|
||||
// readMetaData reads and sets the new metadata for a storage.Object
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//go:build !plan9
|
||||
|
||||
// Package list inplements 'rclone archive list'
|
||||
// Package list implements 'rclone archive list'
|
||||
package list
|
||||
|
||||
import (
|
||||
|
||||
@@ -23,7 +23,7 @@ func init() {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "cryptcheck remote:path cryptedremote:path",
|
||||
Short: `Cryptcheck checks the integrity of an encrypted remote.`,
|
||||
Long: `Checks a remote against a [crypted](/crypt/) remote. This is the equivalent
|
||||
Long: `Checks a remote against an [encrypted](/crypt/) remote. This is the equivalent
|
||||
of running rclone [check](/commands/rclone_check/), but able to check the
|
||||
checksums of the encrypted remote.
|
||||
|
||||
|
||||
@@ -23,7 +23,15 @@ subcommand to specify the protocol, e.g.
|
||||
rclone serve http remote:
|
||||
` + "```" + `
|
||||
|
||||
Each subcommand has its own options which you can see in their help.`,
|
||||
When the "--metadata" flag is enabled, the following metadata fields will be provided as headers:
|
||||
- "content-disposition"
|
||||
- "cache-control"
|
||||
- "content-language"
|
||||
- "content-encoding"
|
||||
Note: The availability of these fields depends on whether the remote supports metadata.
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
|
||||
@@ -1044,3 +1044,6 @@ put them back in again. -->
|
||||
- Alex <64072843+A1ex3@users.noreply.github.com>
|
||||
- n4n5 <its.just.n4n5@gmail.com>
|
||||
- aliaj1 <ali19961@gmail.com>
|
||||
- Sean Turner <30396892+seanturner026@users.noreply.github.com>
|
||||
- jijamik <30904953+jijamik@users.noreply.github.com>
|
||||
- Dominik Sander <git@dsander.de>
|
||||
|
||||
@@ -192,7 +192,7 @@ flag which permanently removes files on deletion instead of hiding
|
||||
them.
|
||||
|
||||
Old versions of files, where available, are visible using the
|
||||
`--b2-versions` flag.
|
||||
`--b2-versions` flag. These can be deleted as required with `delete`.
|
||||
|
||||
It is also possible to view a bucket as it was at a certain point in time,
|
||||
using the `--b2-version-at` flag. This will show the file versions as they
|
||||
|
||||
@@ -221,6 +221,18 @@ client credentials flow. In particular the "onedrive" option does not
|
||||
work. You can use the "sharepoint" option or if that does not find the
|
||||
correct drive ID type it in manually with the "driveid" option.
|
||||
|
||||
To back up any user's data using this flow, grant your Azure AD
|
||||
application the necessary Microsoft Graph *Application permissions*
|
||||
(such as `Files.Read.All`, `Sites.Read.All` and/or `Sites.Selected`).
|
||||
With these permissions, rclone can access drives across the tenant,
|
||||
but it needs to know *which user or drive* you want. Supply a specific
|
||||
`drive_id` corresponding to that user's OneDrive, or a SharePoint site
|
||||
ID for SharePoint libraries. You can obtain a user's drive ID using
|
||||
Microsoft Graph (e.g. `/users/{userUPN}/drive`) and then configure it
|
||||
in rclone. Once the correct drive ID is provided, rclone will back up
|
||||
that user's data using the app-only token without requiring their
|
||||
credentials.
|
||||
|
||||
**NOTE** Assigning permissions directly to the application means that
|
||||
anyone with the *Client ID* and *Client Secret* can access your
|
||||
OneDrive files. Take care to safeguard these credentials.
|
||||
|
||||
@@ -34,7 +34,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Google Photos | - | - | No | Yes | R | - |
|
||||
| HDFS | - | R/W | No | No | - | - |
|
||||
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
|
||||
| HTTP | - | R | No | No | R | - |
|
||||
| HTTP | - | R | No | No | R | R |
|
||||
| iCloud Drive | - | R | No | No | - | - |
|
||||
| Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU |
|
||||
| Jottacloud | MD5 | R/W | Yes | No | R | RW |
|
||||
|
||||
@@ -20,7 +20,7 @@ Unlocks the config file if it is locked.
|
||||
|
||||
Parameters:
|
||||
|
||||
- 'config_password' - password to unlock the config file
|
||||
- 'configPassword' - password to unlock the config file
|
||||
|
||||
A good idea is to disable AskPassword before making this call
|
||||
`,
|
||||
@@ -30,9 +30,13 @@ A good idea is to disable AskPassword before making this call
|
||||
// Unlock the config file
|
||||
// A good idea is to disable AskPassword before making this call
|
||||
func rcConfigPassword(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
configPass, err := in.GetString("config_password")
|
||||
configPass, err := in.GetString("configPassword")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var err2 error
|
||||
configPass, err2 = in.GetString("config_password") // backwards compat
|
||||
if err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if SetConfigPassword(configPass) != nil {
|
||||
return nil, errors.New("failed to set config password")
|
||||
|
||||
@@ -215,13 +215,26 @@ func TestRcPaths(t *testing.T) {
|
||||
func TestRcConfigUnlock(t *testing.T) {
|
||||
call := rc.Calls.Get("config/unlock")
|
||||
assert.NotNil(t, call)
|
||||
|
||||
in := rc.Params{
|
||||
"config_password": "test",
|
||||
"configPassword": "test",
|
||||
}
|
||||
out, err := call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
in = rc.Params{
|
||||
"config_password": "test",
|
||||
}
|
||||
out, err = call.Fn(context.Background(), in)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, out)
|
||||
|
||||
in = rc.Params{
|
||||
"bad_config_password": "test",
|
||||
}
|
||||
out, err = call.Fn(context.Background(), in)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, `Didn't find key "configPassword" in input`)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
34
fs/log.go
34
fs/log.go
@@ -7,6 +7,9 @@ import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/lib/caller"
|
||||
)
|
||||
|
||||
// LogLevel describes rclone's logs. These are a subset of the syslog log levels.
|
||||
@@ -196,12 +199,42 @@ func Panicf(o any, text string, args ...any) {
|
||||
panic(fmt.Sprintf(text, args...))
|
||||
}
|
||||
|
||||
// Panic if this called from an rc job.
|
||||
//
|
||||
// This means fatal errors get turned into panics which get caught by
|
||||
// the rc job handler so they don't crash rclone.
|
||||
//
|
||||
// This detects if we are being called from an rc Job by looking for
|
||||
// Job.run in the call stack.
|
||||
//
|
||||
// Ideally we would do this by passing a context about but we don't
|
||||
// have one with the logging calls yet.
|
||||
//
|
||||
// This is tested in fs/rc/internal_job_test.go in TestInternalFatal.
|
||||
func panicIfRcJob(o any, text string, args []any) {
|
||||
if !caller.Present("(*Job).run") {
|
||||
return
|
||||
}
|
||||
var errTxt strings.Builder
|
||||
_, _ = errTxt.WriteString("fatal error: ")
|
||||
if o != nil {
|
||||
_, _ = fmt.Fprintf(&errTxt, "%v: ", o)
|
||||
}
|
||||
if args != nil {
|
||||
_, _ = fmt.Fprintf(&errTxt, text, args...)
|
||||
} else {
|
||||
_, _ = errTxt.WriteString(text)
|
||||
}
|
||||
panic(errTxt.String())
|
||||
}
|
||||
|
||||
// Fatal writes critical log output for this Object or Fs and calls os.Exit(1).
|
||||
// It should always be seen by the user.
|
||||
func Fatal(o any, text string) {
|
||||
if GetConfig(context.TODO()).LogLevel >= LogLevelCritical {
|
||||
LogPrint(LogLevelCritical, o, text)
|
||||
}
|
||||
panicIfRcJob(o, text, nil)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -211,6 +244,7 @@ func Fatalf(o any, text string, args ...any) {
|
||||
if GetConfig(context.TODO()).LogLevel >= LogLevelCritical {
|
||||
LogPrintf(LogLevelCritical, o, text, args...)
|
||||
}
|
||||
panicIfRcJob(o, text, args)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -191,11 +191,12 @@ var _ fs.Fs = MemoryFs
|
||||
|
||||
// MemoryObject is an in memory object
|
||||
type MemoryObject struct {
|
||||
remote string
|
||||
modTime time.Time
|
||||
content []byte
|
||||
meta fs.Metadata
|
||||
fs fs.Fs
|
||||
remote string
|
||||
modTime time.Time
|
||||
content []byte
|
||||
meta fs.Metadata
|
||||
fs fs.Fs
|
||||
mimeType string
|
||||
}
|
||||
|
||||
// NewMemoryObject returns an in memory Object with the modTime and content passed in
|
||||
@@ -214,6 +215,12 @@ func (o *MemoryObject) WithMetadata(meta fs.Metadata) *MemoryObject {
|
||||
return o
|
||||
}
|
||||
|
||||
// WithMimeType adds mimeType to the MemoryObject
|
||||
func (o *MemoryObject) WithMimeType(mimeType string) *MemoryObject {
|
||||
o.mimeType = mimeType
|
||||
return o
|
||||
}
|
||||
|
||||
// Content returns the underlying buffer
|
||||
func (o *MemoryObject) Content() []byte {
|
||||
return o.content
|
||||
@@ -329,8 +336,14 @@ func (o *MemoryObject) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return o.meta, nil
|
||||
}
|
||||
|
||||
// MimeType on the object
|
||||
func (o *MemoryObject) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check interfaces
|
||||
var (
|
||||
_ fs.Object = (*MemoryObject)(nil)
|
||||
_ fs.MimeTyper = (*MemoryObject)(nil)
|
||||
_ fs.Metadataer = (*MemoryObject)(nil)
|
||||
)
|
||||
|
||||
@@ -87,6 +87,7 @@ func TestMemoryObject(t *testing.T) {
|
||||
content = content[:6] // make some extra cap
|
||||
|
||||
o := object.NewMemoryObject(remote, now, content)
|
||||
o.WithMimeType("text/plain; charset=utf-8")
|
||||
|
||||
assert.Equal(t, content, o.Content())
|
||||
assert.Equal(t, object.MemoryFs, o.Fs())
|
||||
@@ -95,6 +96,7 @@ func TestMemoryObject(t *testing.T) {
|
||||
assert.Equal(t, now, o.ModTime(context.Background()))
|
||||
assert.Equal(t, int64(len(content)), o.Size())
|
||||
assert.Equal(t, true, o.Storable())
|
||||
assert.Equal(t, "text/plain; charset=utf-8", o.MimeType(context.Background()))
|
||||
|
||||
Hash, err := o.Hash(context.Background(), hash.MD5)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -64,6 +64,39 @@ func rcError(ctx context.Context, in Params) (out Params, err error) {
|
||||
return nil, fmt.Errorf("arbitrary error on input %+v", in)
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "rc/panic",
|
||||
Fn: rcPanic,
|
||||
Title: "This returns an error by panicking",
|
||||
Help: `
|
||||
This returns an error with the input as part of its error string.
|
||||
Useful for testing error handling.`,
|
||||
})
|
||||
}
|
||||
|
||||
// Return an error regardless
|
||||
func rcPanic(ctx context.Context, in Params) (out Params, err error) {
|
||||
panic(fmt.Sprintf("arbitrary error on input %+v", in))
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "rc/fatal",
|
||||
Fn: rcFatal,
|
||||
Title: "This returns an fatal error",
|
||||
Help: `
|
||||
This returns an error with the input as part of its error string.
|
||||
Useful for testing error handling.`,
|
||||
})
|
||||
}
|
||||
|
||||
// Return an error regardless
|
||||
func rcFatal(ctx context.Context, in Params) (out Params, err error) {
|
||||
fs.Fatalf(nil, "arbitrary error on input %+v", in)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
Add(Call{
|
||||
Path: "rc/list",
|
||||
|
||||
38
fs/rc/internal_job_test.go
Normal file
38
fs/rc/internal_job_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
// These tests use the job framework so must be external to the module
|
||||
|
||||
package rc_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalPanic(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
call := rc.Calls.Get("rc/panic")
|
||||
assert.NotNil(t, call)
|
||||
in := rc.Params{}
|
||||
_, out, err := jobs.NewJob(ctx, call.Fn, in)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "arbitrary error on input map[]")
|
||||
assert.ErrorContains(t, err, "panic received:")
|
||||
assert.Equal(t, rc.Params{}, out)
|
||||
}
|
||||
|
||||
func TestInternalFatal(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
call := rc.Calls.Get("rc/fatal")
|
||||
assert.NotNil(t, call)
|
||||
in := rc.Params{}
|
||||
_, out, err := jobs.NewJob(ctx, call.Fn, in)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "arbitrary error on input map[]")
|
||||
assert.ErrorContains(t, err, "panic received:")
|
||||
assert.ErrorContains(t, err, "fatal error:")
|
||||
assert.Equal(t, rc.Params{}, out)
|
||||
}
|
||||
@@ -425,8 +425,8 @@ Results:
|
||||
|
||||
- executeId - string id of rclone executing (change after restart)
|
||||
- jobids - array of integer job ids (starting at 1 on each restart)
|
||||
- running_ids - array of integer job ids that are running
|
||||
- finished_ids - array of integer job ids that are finished
|
||||
- runningIds - array of integer job ids that are running
|
||||
- finishedIds - array of integer job ids that are finished
|
||||
`,
|
||||
})
|
||||
}
|
||||
@@ -436,8 +436,8 @@ func rcJobList(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
out["jobids"] = running.IDs()
|
||||
runningIDs, finishedIDs := running.Stats()
|
||||
out["running_ids"] = runningIDs
|
||||
out["finished_ids"] = finishedIDs
|
||||
out["runningIds"] = runningIDs
|
||||
out["finishedIds"] = finishedIDs
|
||||
out["executeId"] = executeID
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -378,8 +378,8 @@ func TestRcJobList(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, out1)
|
||||
assert.Equal(t, []int64{1}, out1["jobids"], "should have job listed")
|
||||
assert.Equal(t, []int64{1}, out1["running_ids"], "should have running job")
|
||||
assert.Equal(t, []int64{}, out1["finished_ids"], "should not have finished job")
|
||||
assert.Equal(t, []int64{1}, out1["runningIds"], "should have running job")
|
||||
assert.Equal(t, []int64{}, out1["finishedIds"], "should not have finished job")
|
||||
|
||||
_, _, err = NewJob(ctx, longFn, rc.Params{"_async": true})
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -420,14 +420,14 @@ backends:
|
||||
fastlist: true
|
||||
ignore:
|
||||
# It just isn't possible to preserve the existing file with azure blob
|
||||
# and make sure we don't leak uncomitted blocks.
|
||||
# and make sure we don't leak uncommitted blocks.
|
||||
- TestMultithreadCopyAbort
|
||||
- backend: "azureblob"
|
||||
remote: "TestAzureBlob,directory_markers:"
|
||||
fastlist: true
|
||||
ignore:
|
||||
# It just isn't possible to preserve the existing file with azure blob
|
||||
# and make sure we don't leak uncomitted blocks.
|
||||
# and make sure we don't leak uncommitted blocks.
|
||||
- TestMultithreadCopyAbort
|
||||
- backend: "azurefiles"
|
||||
remote: "TestAzureFiles:"
|
||||
|
||||
4
go.mod
4
go.mod
@@ -2,6 +2,10 @@ module github.com/rclone/rclone
|
||||
|
||||
go 1.24.4
|
||||
|
||||
godebug (
|
||||
x509negativeserial=1
|
||||
)
|
||||
|
||||
require (
|
||||
bazil.org/fuse v0.0.0-20230120002735-62a210ff1fd5
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0
|
||||
|
||||
26
lib/caller/caller.go
Normal file
26
lib/caller/caller.go
Normal file
@@ -0,0 +1,26 @@
|
||||
// Package caller contains functions to examine the call stack.
|
||||
package caller
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Present looks for functionName in the call stack and return true if found
|
||||
//
|
||||
// Note that this ignores the caller.
|
||||
func Present(functionName string) bool {
|
||||
var pcs [48]uintptr
|
||||
n := runtime.Callers(3, pcs[:]) // skip runtime.Callers, Present and caller
|
||||
frames := runtime.CallersFrames(pcs[:n])
|
||||
for {
|
||||
f, more := frames.Next()
|
||||
if strings.HasSuffix(f.Function, functionName) {
|
||||
return true
|
||||
}
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
37
lib/caller/caller_test.go
Normal file
37
lib/caller/caller_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package caller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPresent(t *testing.T) {
|
||||
assert.False(t, Present("NotFound"))
|
||||
assert.False(t, Present("TestPresent"))
|
||||
f := func() {
|
||||
assert.True(t, Present("TestPresent"))
|
||||
}
|
||||
f()
|
||||
}
|
||||
|
||||
func BenchmarkPresent(b *testing.B) {
|
||||
for b.Loop() {
|
||||
_ = Present("NotFound")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPresent100(b *testing.B) {
|
||||
var fn func(level int)
|
||||
fn = func(level int) {
|
||||
if level > 0 {
|
||||
fn(level - 1)
|
||||
return
|
||||
}
|
||||
for b.Loop() {
|
||||
_ = Present("NotFound")
|
||||
}
|
||||
|
||||
}
|
||||
fn(100)
|
||||
}
|
||||
@@ -39,6 +39,26 @@ func Object(w http.ResponseWriter, r *http.Request, o fs.Object) {
|
||||
modTime := o.ModTime(r.Context())
|
||||
w.Header().Set("Last-Modified", modTime.UTC().Format(http.TimeFormat))
|
||||
|
||||
// Set metadata headers if present
|
||||
metadata, err := fs.GetMetadata(r.Context(), o)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Request get metadata error: %v", err)
|
||||
}
|
||||
if metadata != nil {
|
||||
if metadata["content-disposition"] != "" {
|
||||
w.Header().Set("Content-Disposition", metadata["content-disposition"])
|
||||
}
|
||||
if metadata["cache-control"] != "" {
|
||||
w.Header().Set("Cache-Control", metadata["cache-control"])
|
||||
}
|
||||
if metadata["content-language"] != "" {
|
||||
w.Header().Set("Content-Language", metadata["content-language"])
|
||||
}
|
||||
if metadata["content-encoding"] != "" {
|
||||
w.Header().Set("Content-Encoding", metadata["content-encoding"])
|
||||
}
|
||||
}
|
||||
|
||||
if r.Method == "HEAD" {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -82,3 +84,23 @@ func TestObjectBadRange(t *testing.T) {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
assert.Equal(t, "Bad Request\n", string(body))
|
||||
}
|
||||
|
||||
func TestObjectHEADMetadata(t *testing.T) {
|
||||
w := httptest.NewRecorder()
|
||||
r := httptest.NewRequest("HEAD", "http://example.com/aFile", nil)
|
||||
m := fs.Metadata{
|
||||
"content-disposition": "inline",
|
||||
"cache-control": "no-cache",
|
||||
"content-language": "en",
|
||||
"content-encoding": "gzip",
|
||||
}
|
||||
o := object.NewMemoryObject("aFile", time.Now(), []byte("")).
|
||||
WithMetadata(m).WithMimeType("text/plain; charset=utf-8")
|
||||
Object(w, r, o)
|
||||
resp := w.Result()
|
||||
assert.Equal(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type"))
|
||||
assert.Equal(t, "inline", resp.Header.Get("Content-Disposition"))
|
||||
assert.Equal(t, "no-cache", resp.Header.Get("Cache-Control"))
|
||||
assert.Equal(t, "en", resp.Header.Get("Content-Language"))
|
||||
assert.Equal(t, "gzip", resp.Header.Get("Content-Encoding"))
|
||||
}
|
||||
|
||||
@@ -4,11 +4,10 @@ package pacer
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/lib/caller"
|
||||
liberrors "github.com/rclone/rclone/lib/errors"
|
||||
)
|
||||
|
||||
@@ -193,7 +192,9 @@ func (p *Pacer) endCall(retry bool, err error, limitConnections bool) {
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// Detect the pacer being called reentrantly.
|
||||
// call implements Call but with settable retries
|
||||
//
|
||||
// This detects the pacer being called reentrantly.
|
||||
//
|
||||
// This looks for Pacer.call in the call stack and returns true if it
|
||||
// is found.
|
||||
@@ -204,27 +205,10 @@ func (p *Pacer) endCall(retry bool, err error, limitConnections bool) {
|
||||
// This is only needed when p.maxConnections > 0 which isn't a common
|
||||
// configuration so adding a bit of extra slowdown here is not a
|
||||
// problem.
|
||||
func pacerReentered() bool {
|
||||
var pcs [48]uintptr
|
||||
n := runtime.Callers(3, pcs[:]) // skip runtime.Callers, pacerReentered and call
|
||||
frames := runtime.CallersFrames(pcs[:n])
|
||||
for {
|
||||
f, more := frames.Next()
|
||||
if strings.HasSuffix(f.Function, "(*Pacer).call") {
|
||||
return true
|
||||
}
|
||||
if !more {
|
||||
break
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// call implements Call but with settable retries
|
||||
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||
var retry bool
|
||||
limitConnections := false
|
||||
if p.maxConnections > 0 && !pacerReentered() {
|
||||
if p.maxConnections > 0 && !caller.Present("(*Pacer).call") {
|
||||
limitConnections = true
|
||||
}
|
||||
for i := 1; i <= retries; i++ {
|
||||
|
||||
@@ -353,27 +353,6 @@ func TestCallParallel(t *testing.T) {
|
||||
wait.Broadcast()
|
||||
}
|
||||
|
||||
func BenchmarkPacerReentered(b *testing.B) {
|
||||
for b.Loop() {
|
||||
_ = pacerReentered()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPacerReentered100(b *testing.B) {
|
||||
var fn func(level int)
|
||||
fn = func(level int) {
|
||||
if level > 0 {
|
||||
fn(level - 1)
|
||||
return
|
||||
}
|
||||
for b.Loop() {
|
||||
_ = pacerReentered()
|
||||
}
|
||||
|
||||
}
|
||||
fn(100)
|
||||
}
|
||||
|
||||
func TestCallMaxConnectionsRecursiveDeadlock(t *testing.T) {
|
||||
p := New(CalculatorOption(NewDefault(MinSleep(1*time.Millisecond), MaxSleep(2*time.Millisecond))))
|
||||
p.SetMaxConnections(1)
|
||||
|
||||
Reference in New Issue
Block a user