1
0
mirror of https://github.com/rclone/rclone.git synced 2026-02-03 10:13:22 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
ee28856f1d gcs: Allow compressed files to be downloaded - fixes #2658
Before this change, the go runtime would automatically decompress
compressed objects leading to length mismatches.

After this change rclone will download the compressed object which
will match in length and checksum.
2020-06-25 22:13:33 +01:00
8 changed files with 21 additions and 80 deletions

View File

@@ -61,10 +61,6 @@ vars:
@echo GO_VERSION="'$(GO_VERSION)'"
@echo BETA_URL="'$(BETA_URL)'"
btest:
@echo "[$(TAG)]($(BETA_URL)) on branch [$(BRANCH)](https://github.com/rclone/rclone/tree/$(BRANCH)) (uploaded in 15-30 mins)" | xclip -r -sel clip
@echo "Copied markdown of beta release to clip board"
version:
@echo '$(TAG)'

View File

@@ -72,20 +72,6 @@ NB If filename_encryption is "off" then this option will do nothing.`,
Name: "password2",
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
IsPassword: true,
}, {
Name: "server_side_across_configs",
Default: false,
Help: `Allow server side operations (eg copy) to work across different crypt configs.
Normally this option is not what you want, but if you have two crypts
pointing to the same backend you can use it.
This can be used, for example, to change file name encryption type
without re-uploading all the data. Just make two crypt backends
pointing to two different directories with the single changed
parameter and use rclone move to move the files between the crypt
remotes.`,
Advanced: true,
}, {
Name: "show_mapping",
Help: `For all files listed show how the names encrypt.
@@ -195,7 +181,6 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
CanHaveEmptyDirectories: true,
SetTier: true,
GetTier: true,
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -208,7 +193,6 @@ type Options struct {
DirectoryNameEncryption bool `config:"directory_name_encryption"`
Password string `config:"password"`
Password2 string `config:"password2"`
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
ShowMapping bool `config:"show_mapping"`
}

View File

@@ -298,6 +298,7 @@ type Object struct {
bytes int64 // Bytes in the object
modTime time.Time // Modified time of the object
mimeType string
gzipped bool // set if object has Content-Encoding: gzip
}
// ------------------------------------------------------------
@@ -899,6 +900,7 @@ func (o *Object) setMetaData(info *storage.Object) {
o.url = info.MediaLink
o.bytes = int64(info.Size)
o.mimeType = info.ContentType
o.gzipped = info.ContentEncoding == "gzip"
// Read md5sum
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
@@ -1026,6 +1028,15 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
fs.FixRangeOption(options, o.bytes)
if o.gzipped {
// Allow files which are stored on the cloud storage system
// compressed to be downloaded without being decompressed. Note
// that setting this here overrides the automatic decompression
// in the Transport.
//
// See: https://cloud.google.com/storage/docs/transcoding
req.Header.Set("Accept-Encoding", "gzip")
}
fs.OpenOptionAddHTTPHeaders(req.Header, options)
var res *http.Response
err = o.fs.pacer.Call(func() (bool, error) {

View File

@@ -90,24 +90,7 @@ are being uploaded and aborts with a message which starts "can't copy
However on some file systems this modification time check may fail (eg
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
check can be disabled with this flag.
If this flag is set, rclone will use its best efforts to transfer a
file which is being updated. If the file is only having things
appended to it (eg a log) then rclone will transfer the log file with
the size it had the first time rclone saw it.
If the file is being modified throughout (not just appended to) then
the transfer may fail with a hash check failure.
In detail, once the file has had stat() called on it for the first
time we:
- Only transfer the size that stat gave
- Only checksum the size that stat gave
- Don't update the stat info for the file
`,
check can be disabled with this flag.`,
Default: false,
Advanced: true,
}, {
@@ -826,10 +809,6 @@ func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
} else {
in, err = o.openTranslatedLink(0, -1)
}
// If not checking for updates, only read size given
if o.fs.opt.NoCheckUpdated {
in = readers.NewLimitedReadCloser(in, o.size)
}
if err != nil {
return "", errors.Wrap(err, "hash: failed to open")
}
@@ -976,13 +955,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
}
}
// If not checking updated then limit to current size. This means if
// file is being extended, readers will read a o.Size() bytes rather
// than the new size making for a consistent upload.
if limit < 0 && o.fs.opt.NoCheckUpdated {
limit = o.size
}
// Handle a translated link
if o.translatedLink {
return o.openTranslatedLink(offset, limit)
@@ -1179,10 +1151,6 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
// setMetadata sets the file info from the os.FileInfo passed in
func (o *Object) setMetadata(info os.FileInfo) {
// if not checking updated then don't update the stat
if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() {
return
}
// Don't overwrite the info if we don't need to
// this avoids upsetting the race detector
if o.size != info.Size() {

View File

@@ -40,7 +40,6 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
@@ -1205,9 +1204,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint).
WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}

View File

@@ -172,7 +172,7 @@ func makeConfigPath() string {
// Check to see if user supplied a --config variable or environment
// variable. We can't use pflag for this because it isn't initialised
// yet so we search the command line manually.
_, configSupplied := fs.LookupEnv("RCLONE_CONFIG")
_, configSupplied := os.LookupEnv("RCLONE_CONFIG")
if !configSupplied {
for _, item := range os.Args {
if item == "--config" || strings.HasPrefix(item, "--config=") {
@@ -311,7 +311,7 @@ func loadConfigFile() (*goconfig.ConfigFile, error) {
} else {
usingPasswordCommand = false
envpw := fs.Getenv("RCLONE_CONFIG_PASS")
envpw := os.Getenv("RCLONE_CONFIG_PASS")
if envpw != "" {
err := setConfigPassword(envpw)
@@ -1432,7 +1432,7 @@ func FileGetFlag(section, key string) (string, bool) {
// It looks up defaults in the environment if they are present
func FileGet(section, key string, defaultVal ...string) string {
envKey := fs.ConfigToEnv(section, key)
newValue, found := fs.LookupEnv(envKey)
newValue, found := os.LookupEnv(envKey)
if found {
defaultVal = []string{newValue}
}

View File

@@ -4,6 +4,7 @@ package flags
import (
"log"
"os"
"time"
"github.com/rclone/rclone/fs"
@@ -14,7 +15,7 @@ import (
// sets the default from the environment if possible.
func setDefaultFromEnv(flags *pflag.FlagSet, name string) {
key := fs.OptionToEnv(name)
newValue, found := fs.LookupEnv(key)
newValue, found := os.LookupEnv(key)
if found {
flag := flags.Lookup(name)
if flag == nil {

View File

@@ -1193,25 +1193,9 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
type configEnvVars string
// LookupEnv calls os.LookupEnv and Debugfs a message if the env var was found
func LookupEnv(key string) (value string, ok bool) {
value, ok = os.LookupEnv(key)
if ok {
Debugf(nil, "Read env var %s", key)
}
return value, ok
}
// Getenv calls os.LookupEnv and Debugfs a message if the env var was
// found. If the var wasn't round it returns empty string.
func Getenv(key string) (value string) {
value, _ = LookupEnv(key)
return value
}
// Get a config item from the environment variables if possible
func (configName configEnvVars) Get(key string) (value string, ok bool) {
return LookupEnv(ConfigToEnv(string(configName), key))
return os.LookupEnv(ConfigToEnv(string(configName), key))
}
// A configmap.Getter to read from the environment RCLONE_option_name
@@ -1227,12 +1211,12 @@ func (oev optionEnvVars) Get(key string) (value string, ok bool) {
}
// For options with NoPrefix set, check without prefix too
if opt.NoPrefix {
value, ok = LookupEnv(OptionToEnv(key))
value, ok = os.LookupEnv(OptionToEnv(key))
if ok {
return value, ok
}
}
return LookupEnv(OptionToEnv(oev.fsInfo.Prefix + "-" + key))
return os.LookupEnv(OptionToEnv(oev.fsInfo.Prefix + "-" + key))
}
// A configmap.Getter to read either the default value or the set