mirror of
https://github.com/rclone/rclone.git
synced 2025-12-21 02:33:49 +00:00
vendor: update all dependencies
This commit is contained in:
250
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
250
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load Diff
58
vendor/github.com/aws/aws-sdk-go/service/s3/bench_test.go
generated
vendored
Normal file
58
vendor/github.com/aws/aws-sdk-go/service/s3/bench_test.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/awstesting/unit"
|
||||
)
|
||||
|
||||
func BenchmarkPresign_GetObject(b *testing.B) {
|
||||
sess := unit.Session
|
||||
svc := New(sess)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := svc.GetObjectRequest(&GetObjectInput{
|
||||
Bucket: aws.String("mock-bucket"),
|
||||
Key: aws.String("mock-key"),
|
||||
})
|
||||
|
||||
u, h, err := req.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
b.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
if len(u) == 0 {
|
||||
b.Fatalf("expect url, got none")
|
||||
}
|
||||
if len(h) != 0 {
|
||||
b.Fatalf("no signed headers, got %v", h)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPresign_PutObject(b *testing.B) {
|
||||
sess := unit.Session
|
||||
svc := New(sess)
|
||||
|
||||
body := make([]byte, 1024*1024*20)
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := svc.PutObjectRequest(&PutObjectInput{
|
||||
Bucket: aws.String("mock-bucket"),
|
||||
Key: aws.String("mock-key"),
|
||||
Body: bytes.NewReader(body),
|
||||
})
|
||||
|
||||
u, h, err := req.PresignRequest(15 * time.Minute)
|
||||
if err != nil {
|
||||
b.Fatalf("expect no error, got %v", err)
|
||||
}
|
||||
if len(u) == 0 {
|
||||
b.Fatalf("expect url, got none")
|
||||
}
|
||||
if len(h) == 0 {
|
||||
b.Fatalf("expect signed header, got none")
|
||||
}
|
||||
}
|
||||
}
|
||||
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Normal file
249
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go
generated
vendored
Normal file
@@ -0,0 +1,249 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
const (
|
||||
contentMD5Header = "Content-Md5"
|
||||
contentSha256Header = "X-Amz-Content-Sha256"
|
||||
amzTeHeader = "X-Amz-Te"
|
||||
amzTxEncodingHeader = "X-Amz-Transfer-Encoding"
|
||||
|
||||
appendMD5TxEncoding = "append-md5"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
if !aws.IsReaderSeekable(r.Body) {
|
||||
if r.Config.Logger != nil {
|
||||
r.Config.Logger.Log(fmt.Sprintf(
|
||||
"Unable to compute Content-MD5 for unseekable body, S3.%s",
|
||||
r.Operation.Name))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(h, r.Body); err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
v := base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
r.HTTPRequest.Header.Set(contentMD5Header, v)
|
||||
}
|
||||
|
||||
// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the
|
||||
// request. If the body is not seekable or S3DisableContentMD5Validation set
|
||||
// this handler will be ignored.
|
||||
func computeBodyHashes(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
if r.Error != nil || !aws.IsReaderSeekable(r.Body) {
|
||||
return
|
||||
}
|
||||
|
||||
var md5Hash, sha256Hash hash.Hash
|
||||
hashers := make([]io.Writer, 0, 2)
|
||||
|
||||
// Determine upfront which hashes can be set without overriding user
|
||||
// provide header data.
|
||||
if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 {
|
||||
md5Hash = md5.New()
|
||||
hashers = append(hashers, md5Hash)
|
||||
}
|
||||
|
||||
if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 {
|
||||
sha256Hash = sha256.New()
|
||||
hashers = append(hashers, sha256Hash)
|
||||
}
|
||||
|
||||
// Create the destination writer based on the hashes that are not already
|
||||
// provided by the user.
|
||||
var dst io.Writer
|
||||
switch len(hashers) {
|
||||
case 0:
|
||||
return
|
||||
case 1:
|
||||
dst = hashers[0]
|
||||
default:
|
||||
dst = io.MultiWriter(hashers...)
|
||||
}
|
||||
|
||||
if _, err := copySeekableBody(dst, r.Body); err != nil {
|
||||
r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err)
|
||||
return
|
||||
}
|
||||
|
||||
// For the hashes created, set the associated headers that the user did not
|
||||
// already provide.
|
||||
if md5Hash != nil {
|
||||
sum := make([]byte, md5.Size)
|
||||
encoded := make([]byte, md5Base64EncLen)
|
||||
|
||||
base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)}
|
||||
}
|
||||
|
||||
if sha256Hash != nil {
|
||||
encoded := make([]byte, sha256HexEncLen)
|
||||
sum := make([]byte, sha256.Size)
|
||||
|
||||
hex.Encode(encoded, sha256Hash.Sum(sum[0:0]))
|
||||
r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen
|
||||
sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen
|
||||
)
|
||||
|
||||
func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
|
||||
curPos, err := src.Seek(0, sdkio.SeekCurrent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
n, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
_, err = src.Seek(curPos, sdkio.SeekStart)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Adds the x-amz-te: append_md5 header to the request. This requests the service
|
||||
// responds with a trailing MD5 checksum.
|
||||
//
|
||||
// Will not ask for append MD5 if disabled, the request is presigned or,
|
||||
// or the API operation does not support content MD5 validation.
|
||||
func askForTxEncodingAppendMD5(r *request.Request) {
|
||||
if aws.BoolValue(r.Config.S3DisableContentMD5Validation) {
|
||||
return
|
||||
}
|
||||
if r.IsPresigned() {
|
||||
return
|
||||
}
|
||||
r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding)
|
||||
}
|
||||
|
||||
func useMD5ValidationReader(r *request.Request) {
|
||||
if r.Error != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding {
|
||||
return
|
||||
}
|
||||
|
||||
var bodyReader *io.ReadCloser
|
||||
var contentLen int64
|
||||
switch tv := r.Data.(type) {
|
||||
case *GetObjectOutput:
|
||||
bodyReader = &tv.Body
|
||||
contentLen = aws.Int64Value(tv.ContentLength)
|
||||
// Update ContentLength hiden the trailing MD5 checksum.
|
||||
tv.ContentLength = aws.Int64(contentLen - md5.Size)
|
||||
tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range"))
|
||||
default:
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("%s: %s header received on unsupported API, %s",
|
||||
amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if contentLen < md5.Size {
|
||||
r.Error = awserr.New("ChecksumValidationError",
|
||||
fmt.Sprintf("invalid Content-Length %d for %s %s",
|
||||
contentLen, appendMD5TxEncoding, amzTxEncodingHeader,
|
||||
), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap and swap the response body reader with the validation reader.
|
||||
*bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size)
|
||||
}
|
||||
|
||||
type md5ValidationReader struct {
|
||||
rawReader io.ReadCloser
|
||||
payload io.Reader
|
||||
hash hash.Hash
|
||||
|
||||
payloadLen int64
|
||||
read int64
|
||||
}
|
||||
|
||||
func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader {
|
||||
h := md5.New()
|
||||
return &md5ValidationReader{
|
||||
rawReader: reader,
|
||||
payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h),
|
||||
hash: h,
|
||||
payloadLen: payloadLen,
|
||||
}
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Read(p []byte) (n int, err error) {
|
||||
n, err = v.payload.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
v.read += int64(n)
|
||||
|
||||
if err == io.EOF {
|
||||
if v.read != v.payloadLen {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
expectSum := make([]byte, md5.Size)
|
||||
actualSum := make([]byte, md5.Size)
|
||||
if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil {
|
||||
return n, sumReadErr
|
||||
}
|
||||
actualSum = v.hash.Sum(actualSum[0:0])
|
||||
if !bytes.Equal(expectSum, actualSum) {
|
||||
return n, awserr.New("InvalidChecksum",
|
||||
fmt.Sprintf("expected MD5 checksum %s, got %s",
|
||||
hex.EncodeToString(expectSum),
|
||||
hex.EncodeToString(actualSum),
|
||||
),
|
||||
nil)
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (v *md5ValidationReader) Close() error {
|
||||
return v.rawReader.Close()
|
||||
}
|
||||
523
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash_test.go
generated
vendored
Normal file
523
vendor/github.com/aws/aws-sdk-go/service/s3/body_hash_test.go
generated
vendored
Normal file
@@ -0,0 +1,523 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
type errorReader struct{}
|
||||
|
||||
func (errorReader) Read([]byte) (int, error) {
|
||||
return 0, fmt.Errorf("errorReader error")
|
||||
}
|
||||
func (errorReader) Seek(int64, int) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func TestComputeBodyHases(t *testing.T) {
|
||||
bodyContent := []byte("bodyContent goes here")
|
||||
|
||||
cases := []struct {
|
||||
Req *request.Request
|
||||
ExpectMD5 string
|
||||
ExpectSHA256 string
|
||||
Error string
|
||||
DisableContentMD5 bool
|
||||
Presigned bool
|
||||
}{
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
Body: bytes.NewReader(bodyContent),
|
||||
},
|
||||
ExpectMD5: "CqD6NNPvoNOBT/5pkjtzOw==",
|
||||
ExpectSHA256: "3ff09c8b42a58a905e27835919ede45b61722e7cd400f30101bd9ed1a69a1825",
|
||||
},
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(contentMD5Header, "MD5AlreadySet")
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Body: bytes.NewReader(bodyContent),
|
||||
},
|
||||
ExpectMD5: "MD5AlreadySet",
|
||||
ExpectSHA256: "3ff09c8b42a58a905e27835919ede45b61722e7cd400f30101bd9ed1a69a1825",
|
||||
},
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(contentSha256Header, "SHA256AlreadySet")
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Body: bytes.NewReader(bodyContent),
|
||||
},
|
||||
ExpectMD5: "CqD6NNPvoNOBT/5pkjtzOw==",
|
||||
ExpectSHA256: "SHA256AlreadySet",
|
||||
},
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(contentMD5Header, "MD5AlreadySet")
|
||||
h.Set(contentSha256Header, "SHA256AlreadySet")
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Body: bytes.NewReader(bodyContent),
|
||||
},
|
||||
ExpectMD5: "MD5AlreadySet",
|
||||
ExpectSHA256: "SHA256AlreadySet",
|
||||
},
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
// Non-seekable reader
|
||||
Body: aws.ReadSeekCloser(bytes.NewBuffer(bodyContent)),
|
||||
},
|
||||
ExpectMD5: "",
|
||||
ExpectSHA256: "",
|
||||
},
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
// Empty seekable body
|
||||
Body: aws.ReadSeekCloser(bytes.NewReader(nil)),
|
||||
},
|
||||
ExpectMD5: "1B2M2Y8AsgTpgAmY7PhCfg==",
|
||||
ExpectSHA256: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
},
|
||||
{
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
// failure while reading reader
|
||||
Body: errorReader{},
|
||||
},
|
||||
ExpectMD5: "",
|
||||
ExpectSHA256: "",
|
||||
Error: "errorReader error",
|
||||
},
|
||||
{
|
||||
// Disabled ContentMD5 validation
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
Body: bytes.NewReader(bodyContent),
|
||||
},
|
||||
ExpectMD5: "",
|
||||
ExpectSHA256: "",
|
||||
DisableContentMD5: true,
|
||||
},
|
||||
{
|
||||
// Disabled ContentMD5 validation
|
||||
Req: &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
Body: bytes.NewReader(bodyContent),
|
||||
},
|
||||
ExpectMD5: "",
|
||||
ExpectSHA256: "",
|
||||
Presigned: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
c.Req.Config.S3DisableContentMD5Validation = aws.Bool(c.DisableContentMD5)
|
||||
|
||||
if c.Presigned {
|
||||
c.Req.ExpireTime = 10 * time.Minute
|
||||
}
|
||||
computeBodyHashes(c.Req)
|
||||
|
||||
if e, a := c.ExpectMD5, c.Req.HTTPRequest.Header.Get(contentMD5Header); e != a {
|
||||
t.Errorf("%d, expect %v md5, got %v", i, e, a)
|
||||
}
|
||||
|
||||
if e, a := c.ExpectSHA256, c.Req.HTTPRequest.Header.Get(contentSha256Header); e != a {
|
||||
t.Errorf("%d, expect %v sha256, got %v", i, e, a)
|
||||
}
|
||||
|
||||
if len(c.Error) != 0 {
|
||||
if c.Req.Error == nil {
|
||||
t.Fatalf("%d, expect error, got none", i)
|
||||
}
|
||||
if e, a := c.Error, c.Req.Error.Error(); !strings.Contains(a, e) {
|
||||
t.Errorf("%d, expect %v error to be in %v", i, e, a)
|
||||
}
|
||||
|
||||
} else if c.Req.Error != nil {
|
||||
t.Errorf("%d, expect no error, got %v", i, c.Req.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkComputeBodyHashes(b *testing.B) {
|
||||
body := bytes.NewReader(make([]byte, 2*1024))
|
||||
req := &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
Body: body,
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
computeBodyHashes(req)
|
||||
if req.Error != nil {
|
||||
b.Fatalf("expect no error, got %v", req.Error)
|
||||
}
|
||||
|
||||
req.HTTPRequest.Header = http.Header{}
|
||||
body.Seek(0, sdkio.SeekStart)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAskForTxEncodingAppendMD5(t *testing.T) {
|
||||
cases := []struct {
|
||||
DisableContentMD5 bool
|
||||
Presigned bool
|
||||
}{
|
||||
{DisableContentMD5: true},
|
||||
{DisableContentMD5: false},
|
||||
{Presigned: true},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
req := &request.Request{
|
||||
HTTPRequest: &http.Request{
|
||||
Header: http.Header{},
|
||||
},
|
||||
Config: aws.Config{
|
||||
S3DisableContentMD5Validation: aws.Bool(c.DisableContentMD5),
|
||||
},
|
||||
}
|
||||
if c.Presigned {
|
||||
req.ExpireTime = 10 * time.Minute
|
||||
}
|
||||
|
||||
askForTxEncodingAppendMD5(req)
|
||||
|
||||
v := req.HTTPRequest.Header.Get(amzTeHeader)
|
||||
|
||||
expectHeader := !(c.DisableContentMD5 || c.Presigned)
|
||||
|
||||
if e, a := expectHeader, len(v) != 0; e != a {
|
||||
t.Errorf("%d, expect %t disable content MD5, got %t, %s", i, e, a, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUseMD5ValidationReader(t *testing.T) {
|
||||
body := []byte("create a really cool md5 checksum of me")
|
||||
bodySum := md5.Sum(body)
|
||||
bodyWithSum := append(body, bodySum[:]...)
|
||||
|
||||
emptyBodySum := md5.Sum([]byte{})
|
||||
|
||||
cases := []struct {
|
||||
Req *request.Request
|
||||
Error string
|
||||
Validate func(outupt interface{}) error
|
||||
}{
|
||||
{
|
||||
// Positive: Use Validation reader
|
||||
Req: &request.Request{
|
||||
HTTPResponse: &http.Response{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(amzTxEncodingHeader, appendMD5TxEncoding)
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Data: &GetObjectOutput{
|
||||
Body: ioutil.NopCloser(bytes.NewReader(bodyWithSum)),
|
||||
ContentLength: aws.Int64(int64(len(bodyWithSum))),
|
||||
},
|
||||
},
|
||||
Validate: func(output interface{}) error {
|
||||
getObjOut := output.(*GetObjectOutput)
|
||||
reader, ok := getObjOut.Body.(*md5ValidationReader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expect %T updated body reader, got %T",
|
||||
(*md5ValidationReader)(nil), getObjOut.Body)
|
||||
}
|
||||
|
||||
if reader.rawReader == nil {
|
||||
return fmt.Errorf("expect rawReader not to be nil")
|
||||
}
|
||||
if reader.payload == nil {
|
||||
return fmt.Errorf("expect payload not to be nil")
|
||||
}
|
||||
if e, a := int64(len(bodyWithSum)-md5.Size), reader.payloadLen; e != a {
|
||||
return fmt.Errorf("expect %v payload len, got %v", e, a)
|
||||
}
|
||||
if reader.hash == nil {
|
||||
return fmt.Errorf("expect hash not to be nil")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Positive: Use Validation reader, empty object
|
||||
Req: &request.Request{
|
||||
HTTPResponse: &http.Response{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(amzTxEncodingHeader, appendMD5TxEncoding)
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Data: &GetObjectOutput{
|
||||
Body: ioutil.NopCloser(bytes.NewReader(emptyBodySum[:])),
|
||||
ContentLength: aws.Int64(int64(len(emptyBodySum[:]))),
|
||||
},
|
||||
},
|
||||
Validate: func(output interface{}) error {
|
||||
getObjOut := output.(*GetObjectOutput)
|
||||
reader, ok := getObjOut.Body.(*md5ValidationReader)
|
||||
if !ok {
|
||||
return fmt.Errorf("expect %T updated body reader, got %T",
|
||||
(*md5ValidationReader)(nil), getObjOut.Body)
|
||||
}
|
||||
|
||||
if reader.rawReader == nil {
|
||||
return fmt.Errorf("expect rawReader not to be nil")
|
||||
}
|
||||
if reader.payload == nil {
|
||||
return fmt.Errorf("expect payload not to be nil")
|
||||
}
|
||||
if e, a := int64(len(emptyBodySum)-md5.Size), reader.payloadLen; e != a {
|
||||
return fmt.Errorf("expect %v payload len, got %v", e, a)
|
||||
}
|
||||
if reader.hash == nil {
|
||||
return fmt.Errorf("expect hash not to be nil")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Negative: amzTxEncoding header not set
|
||||
Req: &request.Request{
|
||||
HTTPResponse: &http.Response{
|
||||
Header: http.Header{},
|
||||
},
|
||||
Data: &GetObjectOutput{
|
||||
Body: ioutil.NopCloser(bytes.NewReader(body)),
|
||||
ContentLength: aws.Int64(int64(len(body))),
|
||||
},
|
||||
},
|
||||
Validate: func(output interface{}) error {
|
||||
getObjOut := output.(*GetObjectOutput)
|
||||
reader, ok := getObjOut.Body.(*md5ValidationReader)
|
||||
if ok {
|
||||
return fmt.Errorf("expect body reader not to be %T",
|
||||
reader)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Negative: Not GetObjectOutput type.
|
||||
Req: &request.Request{
|
||||
Operation: &request.Operation{
|
||||
Name: "PutObject",
|
||||
},
|
||||
HTTPResponse: &http.Response{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(amzTxEncodingHeader, appendMD5TxEncoding)
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Data: &PutObjectOutput{},
|
||||
},
|
||||
Error: "header received on unsupported API",
|
||||
Validate: func(output interface{}) error {
|
||||
_, ok := output.(*PutObjectOutput)
|
||||
if !ok {
|
||||
return fmt.Errorf("expect %T output not to change, got %T",
|
||||
(*PutObjectOutput)(nil), output)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Negative: invalid content length.
|
||||
Req: &request.Request{
|
||||
HTTPResponse: &http.Response{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(amzTxEncodingHeader, appendMD5TxEncoding)
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Data: &GetObjectOutput{
|
||||
Body: ioutil.NopCloser(bytes.NewReader(bodyWithSum)),
|
||||
ContentLength: aws.Int64(-1),
|
||||
},
|
||||
},
|
||||
Error: "invalid Content-Length -1",
|
||||
Validate: func(output interface{}) error {
|
||||
getObjOut := output.(*GetObjectOutput)
|
||||
reader, ok := getObjOut.Body.(*md5ValidationReader)
|
||||
if ok {
|
||||
return fmt.Errorf("expect body reader not to be %T",
|
||||
reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// Negative: invalid content length, < md5.Size.
|
||||
Req: &request.Request{
|
||||
HTTPResponse: &http.Response{
|
||||
Header: func() http.Header {
|
||||
h := http.Header{}
|
||||
h.Set(amzTxEncodingHeader, appendMD5TxEncoding)
|
||||
return h
|
||||
}(),
|
||||
},
|
||||
Data: &GetObjectOutput{
|
||||
Body: ioutil.NopCloser(bytes.NewReader(make([]byte, 5))),
|
||||
ContentLength: aws.Int64(5),
|
||||
},
|
||||
},
|
||||
Error: "invalid Content-Length 5",
|
||||
Validate: func(output interface{}) error {
|
||||
getObjOut := output.(*GetObjectOutput)
|
||||
reader, ok := getObjOut.Body.(*md5ValidationReader)
|
||||
if ok {
|
||||
return fmt.Errorf("expect body reader not to be %T",
|
||||
reader)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
useMD5ValidationReader(c.Req)
|
||||
if len(c.Error) != 0 {
|
||||
if c.Req.Error == nil {
|
||||
t.Fatalf("%d, expect error, got none", i)
|
||||
}
|
||||
if e, a := c.Error, c.Req.Error.Error(); !strings.Contains(a, e) {
|
||||
t.Errorf("%d, expect %v error to be in %v", i, e, a)
|
||||
}
|
||||
} else if c.Req.Error != nil {
|
||||
t.Errorf("%d, expect no error, got %v", i, c.Req.Error)
|
||||
}
|
||||
|
||||
if c.Validate != nil {
|
||||
if err := c.Validate(c.Req.Data); err != nil {
|
||||
t.Errorf("%d, expect Data to validate, got %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReaderMD5Validation(t *testing.T) {
|
||||
body := []byte("create a really cool md5 checksum of me")
|
||||
bodySum := md5.Sum(body)
|
||||
bodyWithSum := append(body, bodySum[:]...)
|
||||
emptyBodySum := md5.Sum([]byte{})
|
||||
badBodySum := append(body, emptyBodySum[:]...)
|
||||
|
||||
cases := []struct {
|
||||
Content []byte
|
||||
ContentReader io.ReadCloser
|
||||
PayloadLen int64
|
||||
Error string
|
||||
}{
|
||||
{
|
||||
Content: bodyWithSum,
|
||||
PayloadLen: int64(len(body)),
|
||||
},
|
||||
{
|
||||
Content: emptyBodySum[:],
|
||||
PayloadLen: 0,
|
||||
},
|
||||
{
|
||||
Content: badBodySum,
|
||||
PayloadLen: int64(len(body)),
|
||||
Error: "expected MD5 checksum",
|
||||
},
|
||||
{
|
||||
Content: emptyBodySum[:len(emptyBodySum)-2],
|
||||
PayloadLen: 0,
|
||||
Error: "unexpected EOF",
|
||||
},
|
||||
{
|
||||
Content: body,
|
||||
PayloadLen: int64(len(body) * 2),
|
||||
Error: "unexpected EOF",
|
||||
},
|
||||
{
|
||||
ContentReader: ioutil.NopCloser(errorReader{}),
|
||||
PayloadLen: int64(len(body)),
|
||||
Error: "errorReader error",
|
||||
},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
reader := c.ContentReader
|
||||
if reader == nil {
|
||||
reader = ioutil.NopCloser(bytes.NewReader(c.Content))
|
||||
}
|
||||
v := newMD5ValidationReader(reader, c.PayloadLen)
|
||||
|
||||
var actual bytes.Buffer
|
||||
n, err := io.Copy(&actual, v)
|
||||
if len(c.Error) != 0 {
|
||||
if err == nil {
|
||||
t.Errorf("%d, expect error, got none", i)
|
||||
}
|
||||
if e, a := c.Error, err.Error(); !strings.Contains(a, e) {
|
||||
t.Errorf("%d, expect %v error to be in %v", i, e, a)
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
t.Errorf("%d, expect no error, got %v", i, err)
|
||||
continue
|
||||
}
|
||||
if e, a := c.PayloadLen, n; e != a {
|
||||
t.Errorf("%d, expect %v len, got %v", i, e, a)
|
||||
}
|
||||
|
||||
if e, a := c.Content[:c.PayloadLen], actual.Bytes(); !bytes.Equal(e, a) {
|
||||
t.Errorf("%d, expect:\n%v\nactual:\n%v", i, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
36
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
generated
vendored
@@ -1,36 +0,0 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
)
|
||||
|
||||
// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
|
||||
// require it.
|
||||
func contentMD5(r *request.Request) {
|
||||
h := md5.New()
|
||||
|
||||
// hash the body. seek back to the first position after reading to reset
|
||||
// the body for transmission. copy errors may be assumed to be from the
|
||||
// body.
|
||||
_, err := io.Copy(h, r.Body)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to read body", err)
|
||||
return
|
||||
}
|
||||
_, err = r.Body.Seek(0, 0)
|
||||
if err != nil {
|
||||
r.Error = awserr.New("ContentMD5", "failed to seek body", err)
|
||||
return
|
||||
}
|
||||
|
||||
// encode the md5 checksum in base64 and set the request header.
|
||||
sum := h.Sum(nil)
|
||||
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
|
||||
base64.StdEncoding.Encode(sum64, sum)
|
||||
r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
|
||||
}
|
||||
6
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
@@ -42,6 +42,12 @@ func defaultInitRequestFn(r *request.Request) {
|
||||
r.Handlers.Validate.PushFront(populateLocationConstraint)
|
||||
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
|
||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||
case opPutObject, opUploadPart:
|
||||
r.Handlers.Build.PushBack(computeBodyHashes)
|
||||
// Disabled until #1837 root issue is resolved.
|
||||
// case opGetObject:
|
||||
// r.Handlers.Build.PushBack(askForTxEncodingAppendMD5)
|
||||
// r.Handlers.Unmarshal.PushBack(useMD5ValidationReader)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
14
vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/service/s3/examples_test.go
generated
vendored
@@ -1480,6 +1480,12 @@ func ExampleS3_PutBucketLifecycleConfiguration_shared00() {
|
||||
LifecycleConfiguration: &s3.BucketLifecycleConfiguration{
|
||||
Rules: []*s3.LifecycleRule{
|
||||
{
|
||||
Expiration: &s3.LifecycleExpiration{
|
||||
Days: aws.Int64(3650),
|
||||
},
|
||||
Filter: &s3.LifecycleRuleFilter{
|
||||
Prefix: aws.String("documents/"),
|
||||
},
|
||||
ID: aws.String("TestOnly"),
|
||||
Status: aws.String("Enabled"),
|
||||
Transitions: []*s3.Transition{
|
||||
@@ -1525,6 +1531,10 @@ func ExampleS3_PutBucketLogging_shared00() {
|
||||
TargetBucket: aws.String("targetbucket"),
|
||||
TargetGrants: []*s3.TargetGrant{
|
||||
{
|
||||
Grantee: &s3.Grantee{
|
||||
Type: aws.String("Group"),
|
||||
URI: aws.String("http://acs.amazonaws.com/groups/global/AllUsers"),
|
||||
},
|
||||
Permission: aws.String("READ"),
|
||||
},
|
||||
},
|
||||
@@ -1628,6 +1638,10 @@ func ExampleS3_PutBucketReplication_shared00() {
|
||||
Role: aws.String("arn:aws:iam::123456789012:role/examplerole"),
|
||||
Rules: []*s3.ReplicationRule{
|
||||
{
|
||||
Destination: &s3.Destination{
|
||||
Bucket: aws.String("arn:aws:s3:::destinationbucket"),
|
||||
StorageClass: aws.String("STANDARD"),
|
||||
},
|
||||
Prefix: aws.String(""),
|
||||
Status: aws.String("Enabled"),
|
||||
},
|
||||
|
||||
6
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/encryption_client.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/encryption_client.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/client"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
||||
)
|
||||
@@ -71,12 +72,11 @@ func (c *EncryptionClient) PutObjectRequest(input *s3.PutObjectInput) (*request.
|
||||
req, out := c.S3Client.PutObjectRequest(input)
|
||||
|
||||
// Get Size of file
|
||||
n, err := input.Body.Seek(0, 2)
|
||||
n, err := aws.SeekerLen(input.Body)
|
||||
if err != nil {
|
||||
req.Error = err
|
||||
return req, out
|
||||
}
|
||||
input.Body.Seek(0, 0)
|
||||
|
||||
dst, err := getWriterStore(req, c.TempFolderPath, n >= c.MinFileSize)
|
||||
if err != nil {
|
||||
@@ -115,7 +115,7 @@ func (c *EncryptionClient) PutObjectRequest(input *s3.PutObjectInput) (*request.
|
||||
shaHex := hex.EncodeToString(sha.GetValue())
|
||||
req.HTTPRequest.Header.Set("X-Amz-Content-Sha256", shaHex)
|
||||
|
||||
dst.Seek(0, 0)
|
||||
dst.Seek(0, sdkio.SeekStart)
|
||||
input.Body = dst
|
||||
|
||||
err = c.SaveStrategy.Save(env, r)
|
||||
|
||||
4
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/helper_test.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/helper_test.go
generated
vendored
@@ -3,6 +3,8 @@ package s3crypto
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
func TestBytesReadWriteSeeker_Read(t *testing.T) {
|
||||
@@ -55,7 +57,7 @@ func TestBytesReadWriteSeeker_Write(t *testing.T) {
|
||||
func TestBytesReadWriteSeeker_Seek(t *testing.T) {
|
||||
b := &bytesReadWriteSeeker{[]byte{1, 2, 3}, 0}
|
||||
expected := []byte{2, 3}
|
||||
m, err := b.Seek(1, 0)
|
||||
m, err := b.Seek(1, sdkio.SeekStart)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, but received %v", err)
|
||||
|
||||
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/strategy.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/strategy.go
generated
vendored
@@ -63,9 +63,12 @@ func (strat HeaderV2SaveStrategy) Save(env Envelope, req *request.Request) error
|
||||
input.Metadata[http.CanonicalHeaderKey(matDescHeader)] = &env.MatDesc
|
||||
input.Metadata[http.CanonicalHeaderKey(wrapAlgorithmHeader)] = &env.WrapAlg
|
||||
input.Metadata[http.CanonicalHeaderKey(cekAlgorithmHeader)] = &env.CEKAlg
|
||||
input.Metadata[http.CanonicalHeaderKey(tagLengthHeader)] = &env.TagLen
|
||||
input.Metadata[http.CanonicalHeaderKey(unencryptedMD5Header)] = &env.UnencryptedMD5
|
||||
input.Metadata[http.CanonicalHeaderKey(unencryptedContentLengthHeader)] = &env.UnencryptedContentLen
|
||||
|
||||
if len(env.TagLen) > 0 {
|
||||
input.Metadata[http.CanonicalHeaderKey(tagLengthHeader)] = &env.TagLen
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
89
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/strategy_test.go
generated
vendored
89
vendor/github.com/aws/aws-sdk-go/service/s3/s3crypto/strategy_test.go
generated
vendored
@@ -11,38 +11,67 @@ import (
|
||||
)
|
||||
|
||||
func TestHeaderV2SaveStrategy(t *testing.T) {
|
||||
env := s3crypto.Envelope{
|
||||
CipherKey: "Foo",
|
||||
IV: "Bar",
|
||||
MatDesc: "{}",
|
||||
WrapAlg: s3crypto.KMSWrap,
|
||||
CEKAlg: s3crypto.AESGCMNoPadding,
|
||||
TagLen: "128",
|
||||
UnencryptedMD5: "hello",
|
||||
UnencryptedContentLen: "0",
|
||||
}
|
||||
params := &s3.PutObjectInput{}
|
||||
req := &request.Request{
|
||||
Params: params,
|
||||
}
|
||||
strat := s3crypto.HeaderV2SaveStrategy{}
|
||||
err := strat.Save(env, req)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, but received %v", err)
|
||||
cases := []struct {
|
||||
env s3crypto.Envelope
|
||||
expected map[string]*string
|
||||
}{
|
||||
{
|
||||
s3crypto.Envelope{
|
||||
CipherKey: "Foo",
|
||||
IV: "Bar",
|
||||
MatDesc: "{}",
|
||||
WrapAlg: s3crypto.KMSWrap,
|
||||
CEKAlg: s3crypto.AESGCMNoPadding,
|
||||
TagLen: "128",
|
||||
UnencryptedMD5: "hello",
|
||||
UnencryptedContentLen: "0",
|
||||
},
|
||||
map[string]*string{
|
||||
"X-Amz-Key-V2": aws.String("Foo"),
|
||||
"X-Amz-Iv": aws.String("Bar"),
|
||||
"X-Amz-Matdesc": aws.String("{}"),
|
||||
"X-Amz-Wrap-Alg": aws.String(s3crypto.KMSWrap),
|
||||
"X-Amz-Cek-Alg": aws.String(s3crypto.AESGCMNoPadding),
|
||||
"X-Amz-Tag-Len": aws.String("128"),
|
||||
"X-Amz-Unencrypted-Content-Md5": aws.String("hello"),
|
||||
"X-Amz-Unencrypted-Content-Length": aws.String("0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
s3crypto.Envelope{
|
||||
CipherKey: "Foo",
|
||||
IV: "Bar",
|
||||
MatDesc: "{}",
|
||||
WrapAlg: s3crypto.KMSWrap,
|
||||
CEKAlg: s3crypto.AESGCMNoPadding,
|
||||
UnencryptedMD5: "hello",
|
||||
UnencryptedContentLen: "0",
|
||||
},
|
||||
map[string]*string{
|
||||
"X-Amz-Key-V2": aws.String("Foo"),
|
||||
"X-Amz-Iv": aws.String("Bar"),
|
||||
"X-Amz-Matdesc": aws.String("{}"),
|
||||
"X-Amz-Wrap-Alg": aws.String(s3crypto.KMSWrap),
|
||||
"X-Amz-Cek-Alg": aws.String(s3crypto.AESGCMNoPadding),
|
||||
"X-Amz-Unencrypted-Content-Md5": aws.String("hello"),
|
||||
"X-Amz-Unencrypted-Content-Length": aws.String("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expected := map[string]*string{
|
||||
"X-Amz-Key-V2": aws.String("Foo"),
|
||||
"X-Amz-Iv": aws.String("Bar"),
|
||||
"X-Amz-Matdesc": aws.String("{}"),
|
||||
"X-Amz-Wrap-Alg": aws.String(s3crypto.KMSWrap),
|
||||
"X-Amz-Cek-Alg": aws.String(s3crypto.AESGCMNoPadding),
|
||||
"X-Amz-Tag-Len": aws.String("128"),
|
||||
"X-Amz-Unencrypted-Content-Md5": aws.String("hello"),
|
||||
"X-Amz-Unencrypted-Content-Length": aws.String("0"),
|
||||
}
|
||||
for _, c := range cases {
|
||||
params := &s3.PutObjectInput{}
|
||||
req := &request.Request{
|
||||
Params: params,
|
||||
}
|
||||
strat := s3crypto.HeaderV2SaveStrategy{}
|
||||
err := strat.Save(c.env, req)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, but received %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expected, params.Metadata) {
|
||||
t.Errorf("expected %v, but received %v", expected, params.Metadata)
|
||||
if !reflect.DeepEqual(c.expected, params.Metadata) {
|
||||
t.Errorf("expected %v, but received %v", c.expected, params.Metadata)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
18
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch.go
generated
vendored
@@ -60,7 +60,15 @@ func newError(err error, bucket, key *string) Error {
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
return fmt.Sprintf("failed to upload %q to %q:\n%s", err.Key, err.Bucket, err.OrigErr.Error())
|
||||
origErr := ""
|
||||
if err.OrigErr != nil {
|
||||
origErr = ":\n" + err.OrigErr.Error()
|
||||
}
|
||||
return fmt.Sprintf("failed to upload %q to %q%s",
|
||||
aws.StringValue(err.Key),
|
||||
aws.StringValue(err.Bucket),
|
||||
origErr,
|
||||
)
|
||||
}
|
||||
|
||||
// NewBatchError will return a BatchError that satisfies the awserr.Error interface.
|
||||
@@ -312,7 +320,7 @@ func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
|
||||
}
|
||||
|
||||
if len(input.Delete.Objects) == d.BatchSize || !parity {
|
||||
if err := deleteBatch(d, input, objects); err != nil {
|
||||
if err := deleteBatch(ctx, d, input, objects); err != nil {
|
||||
errs = append(errs, err...)
|
||||
}
|
||||
|
||||
@@ -331,7 +339,7 @@ func (d *BatchDelete) Delete(ctx aws.Context, iter BatchDeleteIterator) error {
|
||||
}
|
||||
|
||||
if input != nil && len(input.Delete.Objects) > 0 {
|
||||
if err := deleteBatch(d, input, objects); err != nil {
|
||||
if err := deleteBatch(ctx, d, input, objects); err != nil {
|
||||
errs = append(errs, err...)
|
||||
}
|
||||
}
|
||||
@@ -352,10 +360,10 @@ func initDeleteObjectsInput(o *s3.DeleteObjectInput) *s3.DeleteObjectsInput {
|
||||
}
|
||||
|
||||
// deleteBatch will delete a batch of items in the objects parameters.
|
||||
func deleteBatch(d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
|
||||
func deleteBatch(ctx aws.Context, d *BatchDelete, input *s3.DeleteObjectsInput, objects []BatchDeleteObject) []Error {
|
||||
errs := []Error{}
|
||||
|
||||
if result, err := d.Client.DeleteObjects(input); err != nil {
|
||||
if result, err := d.Client.DeleteObjectsWithContext(ctx, input); err != nil {
|
||||
for i := 0; i < len(input.Delete.Objects); i++ {
|
||||
errs = append(errs, newError(err, input.Bucket, input.Delete.Objects[i].Key))
|
||||
}
|
||||
|
||||
116
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch_1_7_test.go
generated
vendored
Normal file
116
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch_1_7_test.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// +build go1.7
|
||||
|
||||
package s3manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// #1790 bug
|
||||
func TestBatchDeleteContext(t *testing.T) {
|
||||
cases := []struct {
|
||||
objects []BatchDeleteObject
|
||||
size int
|
||||
expected int
|
||||
ctx aws.Context
|
||||
closeAt int
|
||||
errCheck func(error) (string, bool)
|
||||
}{
|
||||
{
|
||||
[]BatchDeleteObject{
|
||||
{
|
||||
Object: &s3.DeleteObjectInput{
|
||||
Key: aws.String("1"),
|
||||
Bucket: aws.String("bucket1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: &s3.DeleteObjectInput{
|
||||
Key: aws.String("2"),
|
||||
Bucket: aws.String("bucket2"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: &s3.DeleteObjectInput{
|
||||
Key: aws.String("3"),
|
||||
Bucket: aws.String("bucket3"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Object: &s3.DeleteObjectInput{
|
||||
Key: aws.String("4"),
|
||||
Bucket: aws.String("bucket4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
1,
|
||||
0,
|
||||
aws.BackgroundContext(),
|
||||
0,
|
||||
func(err error) (string, bool) {
|
||||
batchErr, ok := err.(*BatchError)
|
||||
if !ok {
|
||||
return "not BatchError type", false
|
||||
}
|
||||
|
||||
errs := batchErr.Errors
|
||||
if len(errs) != 4 {
|
||||
return fmt.Sprintf("expected 1, but received %d", len(errs)), false
|
||||
}
|
||||
|
||||
for _, tempErr := range errs {
|
||||
aerr, ok := tempErr.OrigErr.(awserr.Error)
|
||||
if !ok {
|
||||
return "not awserr.Error type", false
|
||||
}
|
||||
|
||||
if code := aerr.Code(); code != request.CanceledErrorCode {
|
||||
return fmt.Sprintf("expected %q, but received %q", request.CanceledErrorCode, code), false
|
||||
}
|
||||
}
|
||||
return "", true
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
count := 0
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
count++
|
||||
}))
|
||||
|
||||
svc := &mockS3Client{S3: buildS3SvcClient(server.URL)}
|
||||
for i, c := range cases {
|
||||
ctx, done := context.WithCancel(c.ctx)
|
||||
defer done()
|
||||
if i == c.closeAt {
|
||||
done()
|
||||
}
|
||||
|
||||
batcher := BatchDelete{
|
||||
Client: svc,
|
||||
BatchSize: c.size,
|
||||
}
|
||||
|
||||
err := batcher.Delete(ctx, &DeleteObjectsIterator{Objects: c.objects})
|
||||
|
||||
if msg, ok := c.errCheck(err); !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
|
||||
if count != c.expected {
|
||||
t.Errorf("Case %d: expected %d, but received %d", i, c.expected, count)
|
||||
}
|
||||
|
||||
count = 0
|
||||
}
|
||||
}
|
||||
24
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch_test.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/batch_test.go
generated
vendored
@@ -321,6 +321,30 @@ func (client *mockS3Client) ListObjects(input *s3.ListObjectsInput) (*s3.ListObj
|
||||
return object, nil
|
||||
}
|
||||
|
||||
func TestNilOrigError(t *testing.T) {
|
||||
err := Error{
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("key"),
|
||||
}
|
||||
errStr := err.Error()
|
||||
const expected1 = `failed to upload "key" to "bucket"`
|
||||
if errStr != expected1 {
|
||||
t.Errorf("Expected %s, but received %s", expected1, errStr)
|
||||
}
|
||||
|
||||
err = Error{
|
||||
OrigErr: errors.New("foo"),
|
||||
Bucket: aws.String("bucket"),
|
||||
Key: aws.String("key"),
|
||||
}
|
||||
errStr = err.Error()
|
||||
const expected2 = "failed to upload \"key\" to \"bucket\":\nfoo"
|
||||
if errStr != expected2 {
|
||||
t.Errorf("Expected %s, but received %s", expected2, errStr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBatchDeleteList(t *testing.T) {
|
||||
count := 0
|
||||
|
||||
|
||||
15
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region.go
generated
vendored
@@ -14,8 +14,11 @@ import (
|
||||
//
|
||||
// The request will not be signed, and will not use your AWS credentials.
|
||||
//
|
||||
// A "NotFound" error code will be returned if the bucket does not exist in
|
||||
// the AWS partition the regionHint belongs to.
|
||||
// A "NotFound" error code will be returned if the bucket does not exist in the
|
||||
// AWS partition the regionHint belongs to. If the regionHint parameter is an
|
||||
// empty string GetBucketRegion will fallback to the ConfigProvider's region
|
||||
// config. If the regionHint is empty, and the ConfigProvider does not have a
|
||||
// region value, an error will be returned..
|
||||
//
|
||||
// For example to get the region of a bucket which exists in "eu-central-1"
|
||||
// you could provide a region hint of "us-west-2".
|
||||
@@ -33,9 +36,11 @@ import (
|
||||
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
|
||||
//
|
||||
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
|
||||
svc := s3.New(c, &aws.Config{
|
||||
Region: aws.String(regionHint),
|
||||
})
|
||||
var cfg aws.Config
|
||||
if len(regionHint) != 0 {
|
||||
cfg.Region = aws.String(regionHint)
|
||||
}
|
||||
svc := s3.New(c, &cfg)
|
||||
return GetBucketRegionWithClient(ctx, svc, bucket, opts...)
|
||||
}
|
||||
|
||||
|
||||
20
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region_test.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/bucket_region_test.go
generated
vendored
@@ -21,12 +21,15 @@ func testSetupGetBucketRegionServer(region string, statusCode int, incHeader boo
|
||||
}
|
||||
|
||||
var testGetBucketRegionCases = []struct {
|
||||
RespRegion string
|
||||
StatusCode int
|
||||
RespRegion string
|
||||
StatusCode int
|
||||
HintRegion string
|
||||
ExpectReqRegion string
|
||||
}{
|
||||
{"bucket-region", 301},
|
||||
{"bucket-region", 403},
|
||||
{"bucket-region", 200},
|
||||
{"bucket-region", 301, "hint-region", ""},
|
||||
{"bucket-region", 403, "hint-region", ""},
|
||||
{"bucket-region", 200, "hint-region", ""},
|
||||
{"bucket-region", 200, "", "default-region"},
|
||||
}
|
||||
|
||||
func TestGetBucketRegion_Exists(t *testing.T) {
|
||||
@@ -34,11 +37,12 @@ func TestGetBucketRegion_Exists(t *testing.T) {
|
||||
server := testSetupGetBucketRegionServer(c.RespRegion, c.StatusCode, true)
|
||||
|
||||
sess := unit.Session.Copy()
|
||||
sess.Config.Region = aws.String("default-region")
|
||||
sess.Config.Endpoint = aws.String(server.URL)
|
||||
sess.Config.DisableSSL = aws.Bool(true)
|
||||
|
||||
ctx := aws.BackgroundContext()
|
||||
region, err := GetBucketRegion(ctx, sess, "bucket", "region")
|
||||
region, err := GetBucketRegion(ctx, sess, "bucket", c.HintRegion)
|
||||
if err != nil {
|
||||
t.Fatalf("%d, expect no error, got %v", i, err)
|
||||
}
|
||||
@@ -56,7 +60,7 @@ func TestGetBucketRegion_NotExists(t *testing.T) {
|
||||
sess.Config.DisableSSL = aws.Bool(true)
|
||||
|
||||
ctx := aws.BackgroundContext()
|
||||
region, err := GetBucketRegion(ctx, sess, "bucket", "region")
|
||||
region, err := GetBucketRegion(ctx, sess, "bucket", "hint-region")
|
||||
if err == nil {
|
||||
t.Fatalf("expect error, but did not get one")
|
||||
}
|
||||
@@ -74,7 +78,7 @@ func TestGetBucketRegionWithClient(t *testing.T) {
|
||||
server := testSetupGetBucketRegionServer(c.RespRegion, c.StatusCode, true)
|
||||
|
||||
svc := s3.New(unit.Session, &aws.Config{
|
||||
Region: aws.String("region"),
|
||||
Region: aws.String("hint-region"),
|
||||
Endpoint: aws.String(server.URL),
|
||||
DisableSSL: aws.Bool(true),
|
||||
})
|
||||
|
||||
39
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
39
vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go
generated
vendored
@@ -443,6 +443,8 @@ type uploader struct {
|
||||
|
||||
readerPos int64 // current reader position
|
||||
totalSize int64 // set to -1 if the size is not known
|
||||
|
||||
bufferPool sync.Pool
|
||||
}
|
||||
|
||||
// internal logic for deciding whether to upload a single part or use a
|
||||
@@ -456,7 +458,7 @@ func (u *uploader) upload() (*UploadOutput, error) {
|
||||
}
|
||||
|
||||
// Do one read to determine if we have more than one part
|
||||
reader, _, err := u.nextReader()
|
||||
reader, _, part, err := u.nextReader()
|
||||
if err == io.EOF { // single part
|
||||
return u.singlePart(reader)
|
||||
} else if err != nil {
|
||||
@@ -464,7 +466,7 @@ func (u *uploader) upload() (*UploadOutput, error) {
|
||||
}
|
||||
|
||||
mu := multiuploader{uploader: u}
|
||||
return mu.upload(reader)
|
||||
return mu.upload(reader, part)
|
||||
}
|
||||
|
||||
// init will initialize all default options.
|
||||
@@ -476,6 +478,10 @@ func (u *uploader) init() {
|
||||
u.cfg.PartSize = DefaultUploadPartSize
|
||||
}
|
||||
|
||||
u.bufferPool = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
|
||||
}
|
||||
|
||||
// Try to get the total size for some optimizations
|
||||
u.initSize()
|
||||
}
|
||||
@@ -487,10 +493,7 @@ func (u *uploader) initSize() {
|
||||
|
||||
switch r := u.in.Body.(type) {
|
||||
case io.Seeker:
|
||||
pos, _ := r.Seek(0, 1)
|
||||
defer r.Seek(pos, 0)
|
||||
|
||||
n, err := r.Seek(0, 2)
|
||||
n, err := aws.SeekerLen(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -510,7 +513,7 @@ func (u *uploader) initSize() {
|
||||
// This operation increases the shared u.readerPos counter, but note that it
|
||||
// does not need to be wrapped in a mutex because nextReader is only called
|
||||
// from the main thread.
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, int, error) {
|
||||
func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
|
||||
type readerAtSeeker interface {
|
||||
io.ReaderAt
|
||||
io.ReadSeeker
|
||||
@@ -532,14 +535,14 @@ func (u *uploader) nextReader() (io.ReadSeeker, int, error) {
|
||||
reader := io.NewSectionReader(r, u.readerPos, n)
|
||||
u.readerPos += n
|
||||
|
||||
return reader, int(n), err
|
||||
return reader, int(n), nil, err
|
||||
|
||||
default:
|
||||
part := make([]byte, u.cfg.PartSize)
|
||||
part := u.bufferPool.Get().([]byte)
|
||||
n, err := readFillBuf(r, part)
|
||||
u.readerPos += int64(n)
|
||||
|
||||
return bytes.NewReader(part[0:n]), n, err
|
||||
return bytes.NewReader(part[0:n]), n, part, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -589,8 +592,9 @@ type multiuploader struct {
|
||||
|
||||
// keeps track of a single chunk of data being sent to S3.
|
||||
type chunk struct {
|
||||
buf io.ReadSeeker
|
||||
num int64
|
||||
buf io.ReadSeeker
|
||||
part []byte
|
||||
num int64
|
||||
}
|
||||
|
||||
// completedParts is a wrapper to make parts sortable by their part number,
|
||||
@@ -603,7 +607,7 @@ func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].Pa
|
||||
|
||||
// upload will perform a multipart upload using the firstBuf buffer containing
|
||||
// the first chunk of data.
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
|
||||
func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) {
|
||||
params := &s3.CreateMultipartUploadInput{}
|
||||
awsutil.Copy(params, u.in)
|
||||
|
||||
@@ -623,7 +627,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
|
||||
|
||||
// Send part 1 to the workers
|
||||
var num int64 = 1
|
||||
ch <- chunk{buf: firstBuf, num: num}
|
||||
ch <- chunk{buf: firstBuf, part: firstPart, num: num}
|
||||
|
||||
// Read and queue the rest of the parts
|
||||
for u.geterr() == nil && err == nil {
|
||||
@@ -644,7 +648,8 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
|
||||
|
||||
var reader io.ReadSeeker
|
||||
var nextChunkLen int
|
||||
reader, nextChunkLen, err = u.nextReader()
|
||||
var part []byte
|
||||
reader, nextChunkLen, part, err = u.nextReader()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
u.seterr(awserr.New(
|
||||
@@ -661,7 +666,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
|
||||
break
|
||||
}
|
||||
|
||||
ch <- chunk{buf: reader, num: num}
|
||||
ch <- chunk{buf: reader, part: part, num: num}
|
||||
}
|
||||
|
||||
// Close the channel, wait for workers, and complete upload
|
||||
@@ -717,6 +722,8 @@ func (u *multiuploader) send(c chunk) error {
|
||||
PartNumber: &c.num,
|
||||
}
|
||||
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
||||
// put the byte array back into the pool to conserve memory
|
||||
u.bufferPool.Put(c.part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
3
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/internal/sdkio"
|
||||
)
|
||||
|
||||
func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
@@ -17,7 +18,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
|
||||
}
|
||||
body := bytes.NewReader(b)
|
||||
r.HTTPResponse.Body = ioutil.NopCloser(body)
|
||||
defer body.Seek(0, 0)
|
||||
defer body.Seek(0, sdkio.SeekStart)
|
||||
|
||||
if body.Len() == 0 {
|
||||
// If there is no body don't attempt to parse the body.
|
||||
|
||||
Reference in New Issue
Block a user