Update dependencies pre release

This commit is contained in:
Nick Craig-Wood 2016-11-05 18:35:34 +00:00
parent f7af730b50
commit b83f7ac06b
126 changed files with 5782 additions and 19795 deletions

206
Godeps/Godeps.json generated
View File

@ -20,13 +20,13 @@
},
{
"ImportPath": "cloud.google.com/go/compute/metadata",
"Comment": "v0.1.0-185-gfe3d41e",
"Rev": "fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f"
"Comment": "v0.3.0-49-gbd8a5e8",
"Rev": "bd8a5e8b5d233a78bb4ef057f38eae438f8b6f55"
},
{
"ImportPath": "cloud.google.com/go/internal",
"Comment": "v0.1.0-185-gfe3d41e",
"Rev": "fe3d41e1ecb2ce36ad3a979037c9b9a2b726226f"
"Comment": "v0.3.0-49-gbd8a5e8",
"Rev": "bd8a5e8b5d233a78bb4ef057f38eae438f8b6f55"
},
{
"ImportPath": "github.com/Unknwon/goconfig",
@ -39,138 +39,123 @@
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
"Comment": "v1.4.12-6-g388802d",
"Rev": "388802d0cf038f72020ee99b51781d181e894c1c"
"Comment": "v1.1.7",
"Rev": "13a12060f716145019378a10e2806c174356b857"
},
{
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
@ -188,7 +173,7 @@
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "98fa357170587e470c5f27d3c3ea0947b71eb455"
"Rev": "552c7b9542c194800fd493123b3798ef0a832032"
},
{
"ImportPath": "github.com/google/go-querystring/query",
@ -213,8 +198,8 @@
},
{
"ImportPath": "github.com/pkg/errors",
"Comment": "v0.8.0-1-g839d9e9",
"Rev": "839d9e913e063e28dfd0e6c7b7512793e0a48be9"
"Comment": "v0.8.0-2-g248dadf",
"Rev": "248dadf4e9068a0b3e79f02ed0a610d935de5302"
},
{
"ImportPath": "github.com/pmezard/go-difflib/difflib",
@ -240,48 +225,29 @@
},
{
"ImportPath": "github.com/spf13/cobra",
"Rev": "ec2fe7859914a5106dcab4e7901633d959bfc2f4"
"Rev": "6e91dded25d73176bf7f60b40dd7aa1f0bf9be8d"
},
{
"ImportPath": "github.com/spf13/cobra/doc",
"Rev": "ec2fe7859914a5106dcab4e7901633d959bfc2f4"
"Rev": "6e91dded25d73176bf7f60b40dd7aa1f0bf9be8d"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "bf8481a6aebc13a8aab52e699ffe2e79771f5a3f"
"Rev": "5ccb023bc27df288a957c5e994cd44fd19619465"
},
{
"ImportPath": "github.com/stacktic/dropbox",
"Rev": "58f839b21094d5e0af7caf613599830589233d20"
},
{
"ImportPath": "github.com/stretchr/objx",
"Rev": "1a9d0bb9f541897e62256577b352fdbc1fb4fd94"
},
{
"ImportPath": "github.com/stretchr/testify",
"Comment": "v1.1.4-4-g976c720",
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
},
{
"ImportPath": "github.com/stretchr/testify/assert",
"Comment": "v1.1.4-4-g976c720",
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
},
{
"ImportPath": "github.com/stretchr/testify/http",
"Comment": "v1.1.4-4-g976c720",
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
},
{
"ImportPath": "github.com/stretchr/testify/mock",
"Comment": "v1.1.4-4-g976c720",
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
"Comment": "v1.1.3-4-g1f4a164",
"Rev": "1f4a1643a57e798696635ea4c126e9127adb7d3c"
},
{
"ImportPath": "github.com/stretchr/testify/require",
"Comment": "v1.1.4-4-g976c720",
"Rev": "976c720a22c8eb4eb6a0b4348ad85ad12491a506"
"Comment": "v1.1.3-4-g1f4a164",
"Rev": "1f4a1643a57e798696635ea4c126e9127adb7d3c"
},
{
"ImportPath": "github.com/tsenart/tb",
@ -289,87 +255,87 @@
},
{
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
"Rev": "c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6"
},
{
"ImportPath": "golang.org/x/crypto/pbkdf2",
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
"Rev": "c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6"
},
{
"ImportPath": "golang.org/x/crypto/poly1305",
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
"Rev": "c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6"
},
{
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
"Rev": "c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6"
},
{
"ImportPath": "golang.org/x/crypto/scrypt",
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
"Rev": "c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6"
},
{
"ImportPath": "golang.org/x/crypto/ssh/terminal",
"Rev": "5f31782cfb2b6373211f8f9fbf31283fa234b570"
"Rev": "c197bcf24cde29d3f73c7b4ac6fd41f4384e8af6"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "0f2be02c5ddfa7c7936000ad519b04c6c795eab3"
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
},
{
"ImportPath": "golang.org/x/net/context/ctxhttp",
"Rev": "0f2be02c5ddfa7c7936000ad519b04c6c795eab3"
"Rev": "6acef71eb69611914f7a30939ea9f6e194c78172"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
"Rev": "25b4fb1468cb89700c7c060cb99f30581a61f5e3"
},
{
"ImportPath": "golang.org/x/oauth2/google",
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
"Rev": "25b4fb1468cb89700c7c060cb99f30581a61f5e3"
},
{
"ImportPath": "golang.org/x/oauth2/internal",
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
"Rev": "25b4fb1468cb89700c7c060cb99f30581a61f5e3"
},
{
"ImportPath": "golang.org/x/oauth2/jws",
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
"Rev": "25b4fb1468cb89700c7c060cb99f30581a61f5e3"
},
{
"ImportPath": "golang.org/x/oauth2/jwt",
"Rev": "3c3a985cb79f52a3190fbc056984415ca6763d01"
"Rev": "25b4fb1468cb89700c7c060cb99f30581a61f5e3"
},
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "9bb9f0998d48b31547d975974935ae9b48c7a03c"
"Rev": "c200b10b5d5e122be351b67af224adc6128af5bf"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21"
"Rev": "a71fd10341b064c10f4a81ceac72bcf70f26ea34"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "098f51fb687dbaba1f6efabeafbb6461203f9e21"
"Rev": "a71fd10341b064c10f4a81ceac72bcf70f26ea34"
},
{
"ImportPath": "google.golang.org/api/drive/v2",
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
"Rev": "037d03010933c9fa8e57559c1b7faeec971688d1"
},
{
"ImportPath": "google.golang.org/api/gensupport",
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
"Rev": "037d03010933c9fa8e57559c1b7faeec971688d1"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
"Rev": "037d03010933c9fa8e57559c1b7faeec971688d1"
},
{
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
"Rev": "037d03010933c9fa8e57559c1b7faeec971688d1"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "adba394bac5800ff2e620d040e9401528f5b7615"
"Rev": "037d03010933c9fa8e57559c1b7faeec971688d1"
},
{
"ImportPath": "google.golang.org/appengine",

View File

@ -42,12 +42,9 @@ type Error interface {
OrigErr() error
}
// BatchError is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occurred in the batch.
//
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
// compatibility.
// BatchError is a batch of errors which also wraps lower level errors with code, message,
// and original errors. Calling Error() will only return the error that is at the end
// of the list.
type BatchError interface {
// Satisfy the generic error interface.
error
@ -62,35 +59,17 @@ type BatchError interface {
OrigErrs() []error
}
// BatchedErrors is a batch of errors which also wraps lower level errors with
// code, message, and original errors. Calling Error() will include all errors
// that occurred in the batch.
//
// Replaces BatchError
type BatchedErrors interface {
// Satisfy the base Error interface.
Error
// Returns the original error if one was set. Nil is returned if not set.
OrigErrs() []error
}
// New returns an Error object described by the code, message, and origErr.
//
// If origErr satisfies the Error interface it will not be wrapped within a new
// Error object and will instead be returned.
func New(code, message string, origErr error) Error {
var errs []error
if origErr != nil {
errs = append(errs, origErr)
}
return newBaseError(code, message, errs)
return newBaseError(code, message, origErr)
}
// NewBatchError returns an BatchedErrors with a collection of errors as an
// array of errors.
func NewBatchError(code, message string, errs []error) BatchedErrors {
return newBaseError(code, message, errs)
// NewBatchError returns an baseError with an expectation of an array of errors
func NewBatchError(code, message string, errs []error) BatchError {
return newBaseErrors(code, message, errs)
}
// A RequestFailure is an interface to extract request failure information from

View File

@ -34,17 +34,36 @@ type baseError struct {
errs []error
}
// newBaseError returns an error object for the code, message, and errors.
// newBaseError returns an error object for the code, message, and err.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the
// error.
// message is the free flow string containing detailed information about the error.
//
// origErrs is the error objects which will be nested under the new errors to
// be returned.
func newBaseError(code, message string, origErrs []error) *baseError {
// origErr is the error object which will be nested under the new error to be returned.
func newBaseError(code, message string, origErr error) *baseError {
b := &baseError{
code: code,
message: message,
}
if origErr != nil {
b.errs = append(b.errs, origErr)
}
return b
}
// newBaseErrors returns an error object for the code, message, and errors.
//
// code is a short no whitespace phrase depicting the classification of
// the error that is being created.
//
// message is the free flow string containing detailed information about the error.
//
// origErrs is the error objects which will be nested under the new errors to be returned.
func newBaseErrors(code, message string, origErrs []error) *baseError {
b := &baseError{
code: code,
message: message,
@ -84,26 +103,19 @@ func (b baseError) Message() string {
return b.message
}
// OrigErr returns the original error if one was set. Nil is returned if no
// error was set. This only returns the first element in the list. If the full
// list is needed, use BatchedErrors.
// OrigErr returns the original error if one was set. Nil is returned if no error
// was set. This only returns the first element in the list. If the full list is
// needed, use BatchError
func (b baseError) OrigErr() error {
switch len(b.errs) {
case 0:
return nil
case 1:
if size := len(b.errs); size > 0 {
return b.errs[0]
default:
if err, ok := b.errs[0].(Error); ok {
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
}
return NewBatchError("BatchedErrors",
"multiple errors occurred", b.errs)
}
return nil
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
// OrigErrs returns the original errors if one was set. An empty slice is returned if
// no error was set:w
func (b baseError) OrigErrs() []error {
return b.errs
}
@ -121,8 +133,8 @@ type requestError struct {
requestID string
}
// newRequestError returns a wrapped error with additional information for
// request status code, and service requestID.
// newRequestError returns a wrapped error with additional information for request
// status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
@ -161,15 +173,6 @@ func (r requestError) RequestID() string {
return r.requestID
}
// OrigErrs returns the original errors if one was set. An empty slice is
// returned if no error was set.
func (r requestError) OrigErrs() []error {
if b, ok := r.awsError.(BatchedErrors); ok {
return b.OrigErrs()
}
return []error{r.OrigErr()}
}
// An error list that satisfies the golang interface
type errorList []error

View File

@ -3,7 +3,6 @@ package awsutil
import (
"io"
"reflect"
"time"
)
// Copy deeply copies a src structure to dst. Useful for copying request and
@ -50,14 +49,7 @@ func rcopy(dst, src reflect.Value, root bool) {
} else {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
if _, ok := src.Interface().(*time.Time); !ok {
dst.Set(reflect.New(e))
} else {
tempValue := reflect.New(e)
tempValue.Elem().Set(src.Elem())
// Sets time.Time's unexported values
dst.Set(tempValue)
}
dst.Set(reflect.New(e))
}
if src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed

View File

@ -106,8 +106,8 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer
if indexStar || index != nil {
nextvals = []reflect.Value{}
for _, valItem := range values {
value := reflect.Indirect(valItem)
for _, value := range values {
value := reflect.Indirect(value)
if value.Kind() != reflect.Slice {
continue
}

View File

@ -91,10 +91,6 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
default:
if !v.IsValid() {
fmt.Fprint(buf, "<invalid value>")
return
}
format := "%v"
switch v.Interface().(type) {
case string:

View File

@ -87,18 +87,9 @@ const logReqMsg = `DEBUG: Request %s/%s Details:
%s
-----------------------------------------------------`
const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
---[ REQUEST DUMP ERROR ]-----------------------------
%s
-----------------------------------------------------`
func logRequest(r *request.Request) {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
if logBody {
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
@ -116,21 +107,11 @@ const logRespMsg = `DEBUG: Response %s/%s Details:
%s
-----------------------------------------------------`
const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
---[ RESPONSE DUMP ERROR ]-----------------------------
%s
-----------------------------------------------------`
func logResponse(r *request.Request) {
var msg = "no response data"
if r.HTTPResponse != nil {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
msg = string(dumpedBody)
} else if r.Error != nil {
msg = r.Error.Error()

View File

@ -1,8 +1,8 @@
package client
import (
"math"
"math/rand"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/request"
@ -30,61 +30,16 @@ func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
minTime := 30
throttle := d.shouldThrottle(r)
if throttle {
minTime = 500
}
retryCount := r.RetryCount
if retryCount > 13 {
retryCount = 13
} else if throttle && retryCount > 8 {
retryCount = 8
}
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30)
return time.Duration(delay) * time.Millisecond
}
// ShouldRetry returns true if the request should be retried.
// ShouldRetry returns if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
if r.HTTPResponse.StatusCode >= 500 {
return true
}
return r.IsErrorRetryable() || d.shouldThrottle(r)
}
// ShouldThrottle returns true if the request should be throttled.
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
if r.HTTPResponse.StatusCode == 502 ||
r.HTTPResponse.StatusCode == 503 ||
r.HTTPResponse.StatusCode == 504 {
return true
}
return r.IsErrorThrottle()
}
// lockedSource is a thread-safe implementation of rand.Source
type lockedSource struct {
lk sync.Mutex
src rand.Source
}
func (r *lockedSource) Int63() (n int64) {
r.lk.Lock()
n = r.src.Int63()
r.lk.Unlock()
return
}
func (r *lockedSource) Seed(seed int64) {
r.lk.Lock()
r.src.Seed(seed)
r.lk.Unlock()
return r.IsErrorRetryable()
}

View File

@ -7,36 +7,24 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
)
// UseServiceDefaultRetries instructs the config to use the service's own
// default number of retries. This will be the default action if
// Config.MaxRetries is nil also.
// UseServiceDefaultRetries instructs the config to use the service's own default
// number of retries. This will be the default action if Config.MaxRetries
// is nil also.
const UseServiceDefaultRetries = -1
// RequestRetryer is an alias for a type that implements the request.Retryer
// interface.
// RequestRetryer is an alias for a type that implements the request.Retryer interface.
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig tructure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess, err := session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
// })
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{
// Region: aws.String("us-west-2"),
// })
// all clients will use the {defaults.DefaultConfig} structure.
type Config struct {
// Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to
// retrieve credentials.
// Should be used when wanting to see all errors while attempting to retreive
// credentials.
CredentialsChainVerboseErrors *bool
// The credentials object to use when signing requests. Defaults to a
// chain of credential providers to search for credentials in environment
// The credentials object to use when signing requests. Defaults to
// a chain of credential providers to search for credentials in environment
// variables, shared credential file, and EC2 Instance Roles.
Credentials *credentials.Credentials
@ -75,12 +63,11 @@ type Config struct {
Logger Logger
// The maximum number of times that a request will be retried for failures.
// Defaults to -1, which defers the max retry setting to the service
// specific configuration.
// Defaults to -1, which defers the max retry setting to the service specific
// configuration.
MaxRetries *int
// Retryer guides how HTTP requests should be retried in case of
// recoverable failures.
// Retryer guides how HTTP requests should be retried in case of recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
// the request.DefaultRetryer will be used.
@ -95,8 +82,8 @@ type Config struct {
//
Retryer RequestRetryer
// Disables semantic parameter validation, which validates input for
// missing required fields and/or other semantic request input errors.
// Disables semantic parameter validation, which validates input for missing
// required fields and/or other semantic request input errors.
DisableParamValidation *bool
// Disables the computation of request and response checksums, e.g.,
@ -104,8 +91,8 @@ type Config struct {
DisableComputeChecksums *bool
// Set this to `true` to force the request to use path-style addressing,
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
// will use virtual hosted bucket addressing when possible
// i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
// use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
@ -113,93 +100,28 @@ type Config struct {
// Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
// header to PUT requests over 2MB of content. 100-Continue instructs the
// HTTP client not to send the body until the service responds with a
// `continue` status. This is useful to prevent sending the request body
// until after the request is authenticated, and validated.
//
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
//
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
// Set this to `true` to enable S3 Accelerate feature. For all operations
// compatible with S3 Accelerate will use the accelerate endpoint for
// requests. Requests not compatible will fall back to normal S3 requests.
//
// The bucket must be enable for accelerate to be used with S3 client with
// accelerate enabled. If the bucket is not enabled for accelerate an error
// will be returned. The bucket name must be DNS compatible to also work
// with accelerate.
//
// Not compatible with UseDualStack requests will fail if both flags are
// specified.
S3UseAccelerate *bool
// Set this to `true` to disable the EC2Metadata client from overriding the
// default http.Client's Timeout. This is helpful if you do not want the
// EC2Metadata client to create a new http.Client. This options is only
// meaningful if you're not already using a custom HTTP client with the
// SDK. Enabled by default.
// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
// client to create a new http.Client. This options is only meaningful if you're not
// already using a custom HTTP client with the SDK. Enabled by default.
//
// Must be set and provided to the session.NewSession() in order to disable
// the EC2Metadata overriding the timeout for default credentials chain.
// Must be set and provided to the session.New() in order to disable the EC2Metadata
// overriding the timeout for default credentials chain.
//
// Example:
// sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
//
// sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
// svc := s3.New(sess)
//
EC2MetadataDisableTimeoutOverride *bool
// Instructs the endpiont to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//
// If the Endpoint config value is also provided the UseDualStack flag
// will be ignored.
//
// Only supported with.
//
// sess, err := session.NewSession()
//
// svc := s3.New(sess, &aws.Config{
// UseDualStack: aws.Bool(true),
// })
UseDualStack *bool
// SleepDelay is an override for the func the SDK will call when sleeping
// during the lifecycle of a request. Specifically this will be used for
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
SleepDelay func(time.Duration)
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
// NewConfig returns a new Config pointer that can be chained with builder methods to
// set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
// sess, err := session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
// )
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig().
// WithRegion("us-west-2"),
// )
func NewConfig() *Config {
return &Config{}
}
@ -288,27 +210,6 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config {
return c
}
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
// a Config pointer for chaining.
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
c.S3Disable100Continue = &disable
return c
}
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
// pointer for chaining.
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
c.S3UseAccelerate = &enable
return c
}
// WithUseDualStack sets a config UseDualStack value returning a Config
// pointer for chaining.
func (c *Config) WithUseDualStack(enable bool) *Config {
c.UseDualStack = &enable
return c
}
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
@ -387,18 +288,6 @@ func mergeInConfig(dst *Config, other *Config) {
dst.S3ForcePathStyle = other.S3ForcePathStyle
}
if other.S3Disable100Continue != nil {
dst.S3Disable100Continue = other.S3Disable100Continue
}
if other.S3UseAccelerate != nil {
dst.S3UseAccelerate = other.S3UseAccelerate
}
if other.UseDualStack != nil {
dst.UseDualStack = other.UseDualStack
}
if other.EC2MetadataDisableTimeoutOverride != nil {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
}

View File

@ -2,7 +2,7 @@ package aws
import "time"
// String returns a pointer to the string value passed in.
// String returns a pointer to of the string value passed in.
func String(v string) *string {
return &v
}
@ -61,7 +61,7 @@ func StringValueMap(src map[string]*string) map[string]string {
return dst
}
// Bool returns a pointer to the bool value passed in.
// Bool returns a pointer to of the bool value passed in.
func Bool(v bool) *bool {
return &v
}
@ -120,7 +120,7 @@ func BoolValueMap(src map[string]*bool) map[string]bool {
return dst
}
// Int returns a pointer to the int value passed in.
// Int returns a pointer to of the int value passed in.
func Int(v int) *int {
return &v
}
@ -179,7 +179,7 @@ func IntValueMap(src map[string]*int) map[string]int {
return dst
}
// Int64 returns a pointer to the int64 value passed in.
// Int64 returns a pointer to of the int64 value passed in.
func Int64(v int64) *int64 {
return &v
}
@ -238,7 +238,7 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
return dst
}
// Float64 returns a pointer to the float64 value passed in.
// Float64 returns a pointer to of the float64 value passed in.
func Float64(v float64) *float64 {
return &v
}
@ -297,7 +297,7 @@ func Float64ValueMap(src map[string]*float64) map[string]float64 {
return dst
}
// Time returns a pointer to the time.Time value passed in.
// Time returns a pointer to of the time.Time value passed in.
func Time(v time.Time) *time.Time {
return &v
}
@ -311,18 +311,6 @@ func TimeValue(v *time.Time) time.Time {
return time.Time{}
}
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
// The result is undefined if the Unix time cannot be represented by an int64.
// Which includes calling TimeUnixMilli on a zero Time is undefined.
//
// This utility is useful for service API's such as CloudWatch Logs which require
// their unix time values to be in milliseconds.
//
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
func TimeUnixMilli(t time.Time) int64 {
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
}
// TimeSlice converts a slice of time.Time values into a slice of
// time.Time pointers
func TimeSlice(src []time.Time) []*time.Time {

View File

@ -24,38 +24,30 @@ type lener interface {
// BuildContentLengthHandler builds the content length of a request based on the body,
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
// to determine request body length and no "Content-Length" was specified it will panic.
//
// The Content-Length will only be aded to the request if the length of the body
// is greater than 0. If the body is empty or the current `Content-Length`
// header is <= 0, the header will also be stripped.
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
var length int64
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
length, _ = strconv.ParseInt(slength, 10, 64)
} else {
switch body := r.Body.(type) {
case nil:
length = 0
case lener:
length = int64(body.Len())
case io.Seeker:
r.BodyStart, _ = body.Seek(0, 1)
end, _ := body.Seek(0, 2)
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
length = end - r.BodyStart
default:
panic("Cannot get length of body, must provide `ContentLength`")
}
length, _ := strconv.ParseInt(slength, 10, 64)
r.HTTPRequest.ContentLength = length
return
}
if length > 0 {
r.HTTPRequest.ContentLength = length
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
} else {
r.HTTPRequest.ContentLength = 0
r.HTTPRequest.Header.Del("Content-Length")
var length int64
switch body := r.Body.(type) {
case nil:
length = 0
case lener:
length = int64(body.Len())
case io.Seeker:
r.BodyStart, _ = body.Seek(0, 1)
end, _ := body.Seek(0, 2)
body.Seek(r.BodyStart, 0) // make sure to seek back to original location
length = end - r.BodyStart
default:
panic("Cannot get length of body, must provide `ContentLength`")
}
r.HTTPRequest.ContentLength = length
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
}}
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.

View File

@ -1,17 +1,153 @@
package corehandlers
import "github.com/aws/aws-sdk-go/aws/request"
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// ValidateParametersHandler is a request handler to validate the input parameters.
// Validating parameters only has meaning if done prior to the request being sent.
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
if !r.ParamsFilled() {
return
}
if r.ParamsFilled() {
v := validator{errors: []string{}}
v.validateAny(reflect.ValueOf(r.Params), "")
if v, ok := r.Params.(request.Validator); ok {
if err := v.Validate(); err != nil {
r.Error = err
if count := len(v.errors); count > 0 {
format := "%d validation errors:\n- %s"
msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
r.Error = awserr.New("InvalidParameter", msg, nil)
}
}
}}
// A validator validates values. Collects validations errors which occurs.
type validator struct {
errors []string
}
// There's no validation to be done on the contents of []byte values. Prepare
// to check validateAny arguments against that type so we can quickly skip
// them.
var byteSliceType = reflect.TypeOf([]byte(nil))
// validateAny will validate any struct, slice or map type. All validations
// are also performed recursively for nested types.
func (v *validator) validateAny(value reflect.Value, path string) {
value = reflect.Indirect(value)
if !value.IsValid() {
return
}
switch value.Kind() {
case reflect.Struct:
v.validateStruct(value, path)
case reflect.Slice:
if value.Type() == byteSliceType {
// We don't need to validate the contents of []byte.
return
}
for i := 0; i < value.Len(); i++ {
v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
}
case reflect.Map:
for _, n := range value.MapKeys() {
v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
}
}
}
// validateStruct will validate the struct value's fields. If the structure has
// nested types those types will be validated also.
func (v *validator) validateStruct(value reflect.Value, path string) {
prefix := "."
if path == "" {
prefix = ""
}
for i := 0; i < value.Type().NumField(); i++ {
f := value.Type().Field(i)
if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
continue
}
fvalue := value.FieldByName(f.Name)
err := validateField(f, fvalue, validateFieldRequired, validateFieldMin)
if err != nil {
v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
continue
}
v.validateAny(fvalue, path+prefix+f.Name)
}
}
type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error
func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
for _, fn := range funcs {
if err := fn(f, fvalue); err != nil {
return err
}
}
return nil
}
// Validates that a field has a valid value provided for required fields.
func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
if f.Tag.Get("required") == "" {
return nil
}
switch fvalue.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Map:
if fvalue.IsNil() {
return fmt.Errorf("missing required parameter")
}
default:
if !fvalue.IsValid() {
return fmt.Errorf("missing required parameter")
}
}
return nil
}
// Validates that if a value is provided for a field, that value must be at
// least a minimum length.
func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
minStr := f.Tag.Get("min")
if minStr == "" {
return nil
}
min, _ := strconv.ParseInt(minStr, 10, 64)
kind := fvalue.Kind()
if kind == reflect.Ptr {
if fvalue.IsNil() {
return nil
}
fvalue = fvalue.Elem()
}
switch fvalue.Kind() {
case reflect.String:
if int64(fvalue.Len()) < min {
return fmt.Errorf("field too short, minimum length %d", min)
}
case reflect.Slice, reflect.Map:
if fvalue.IsNil() {
return nil
}
if int64(fvalue.Len()) < min {
return fmt.Errorf("field too short, minimum length %d", min)
}
// TODO min can also apply to number minimum value.
}
return nil
}

View File

@ -1,191 +0,0 @@
// Package endpointcreds provides support for retrieving credentials from an
// arbitrary HTTP endpoint.
//
// The credentials endpoint Provider can receive both static and refreshable
// credentials that will expire. Credentials are static when an "Expiration"
// value is not provided in the endpoint's response.
//
// Static credentials will never expire once they have been retrieved. The format
// of the static credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// }
//
// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
// value in the response. The format of the refreshable credentials response:
// {
// "AccessKeyId" : "MUA...",
// "SecretAccessKey" : "/7PC5om....",
// "Token" : "AQoDY....=",
// "Expiration" : "2016-02-25T06:03:31Z"
// }
//
// Errors should be returned in the following format and only returned with 400
// or 500 HTTP status codes.
// {
// "code": "ErrorCode",
// "message": "Helpful error message."
// }
package endpointcreds
import (
"encoding/json"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
// ProviderName is the name of the credentials provider.
const ProviderName = `CredentialsEndpointProvider`
// Provider satisfies the credentials.Provider interface, and is a client to
// retrieve credentials from an arbitrary endpoint.
type Provider struct {
staticCreds bool
credentials.Expiry
// Requires a AWS Client to make HTTP requests to the endpoint with.
// the Endpoint the request will be made to is provided by the aws.Config's
// Endpoint value.
Client *client.Client
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
// from arbitrary endpoint.
func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
p := &Provider{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "CredentialsEndpoint",
Endpoint: endpoint,
},
handlers,
),
}
p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
p.Client.Handlers.Validate.Clear()
p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
for _, option := range options {
option(p)
}
return p
}
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
// from an arbitrary endpoint concurrently. The client will request the
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *Provider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// Retrieve will attempt to request the credentials from the endpoint the Provider
// was configured for. And error will be returned if the retrieval fails.
func (p *Provider) Retrieve() (credentials.Value, error) {
resp, err := p.getCredentials()
if err != nil {
return credentials.Value{ProviderName: ProviderName},
awserr.New("CredentialsEndpointError", "failed to load credentials", err)
}
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
} else {
p.staticCreds = true
}
return credentials.Value{
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.Token,
ProviderName: ProviderName,
}, nil
}
type getCredentialsOutput struct {
Expiration *time.Time
AccessKeyID string
SecretAccessKey string
Token string
}
type errorOutput struct {
Code string `json:"code"`
Message string `json:"message"`
}
func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
op := &request.Operation{
Name: "GetCredentials",
HTTPMethod: "GET",
}
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
req.HTTPRequest.Header.Set("Accept", "application/json")
return out, req.Send()
}
func validateEndpointHandler(r *request.Request) {
if len(r.ClientInfo.Endpoint) == 0 {
r.Error = aws.ErrMissingEndpoint
}
}
func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
out := r.Data.(*getCredentialsOutput)
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
r.Error = awserr.New("SerializationError",
"failed to decode endpoint credentials",
err,
)
}
}
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var errOut errorOutput
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
r.Error = awserr.New("SerializationError",
"failed to decode endpoint credentials",
err,
)
}
// Response body format is not consistent between metadata endpoints.
// Grab the error message as a string and include that as the source error
r.Error = awserr.New(errOut.Code, errOut.Message, nil)
}

View File

@ -14,7 +14,7 @@ var (
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)
// A StaticProvider is a set of credentials which are set programmatically,
// A StaticProvider is a set of credentials which are set pragmatically,
// and will never expire.
type StaticProvider struct {
Value
@ -30,22 +30,13 @@ func NewStaticCredentials(id, secret, token string) *Credentials {
}})
}
// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
// wrapping the static credentials value provide. Same as NewStaticCredentials
// but takes the creds Value instead of individual fields
func NewStaticCredentialsFromCreds(creds Value) *Credentials {
return NewCredentials(&StaticProvider{Value: creds})
}
// Retrieve returns the credentials or error if the credentials are invalid.
func (s *StaticProvider) Retrieve() (Value, error) {
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
}
if len(s.Value.ProviderName) == 0 {
s.Value.ProviderName = StaticProviderName
}
s.Value.ProviderName = StaticProviderName
return s.Value, nil
}

View File

@ -1,161 +0,0 @@
// Package stscreds are credential Providers to retrieve STS AWS credentials.
//
// STS provides multiple ways to retrieve credentials which can be used when making
// future AWS service API operation calls.
package stscreds
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
)
// ProviderName provides a name of AssumeRole provider
const ProviderName = "AssumeRoleProvider"
// AssumeRoler represents the minimal subset of the STS client API used by this provider.
type AssumeRoler interface {
AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
}
// DefaultDuration is the default amount of time in minutes that the credentials
// will be valid for.
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
// keeps track of their expiration time. This provider must be used explicitly,
// as it is not included in the credentials chain.
type AssumeRoleProvider struct {
credentials.Expiry
// STS client to make assume role request with.
Client AssumeRoler
// Role to be assumed.
RoleARN string
// Session name, if you wish to reuse the credentials elsewhere.
RoleSessionName string
// Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// Optional ExternalID to pass along, defaults to nil if not set.
ExternalID *string
// The policy plain text must be 2048 bytes or shorter. However, an internal
// conversion compresses it into a packed binary format with a separate limit.
// The PackedPolicySize response element indicates by percentage how close to
// the upper size limit the policy is, with 100% equaling the maximum allowed
// size.
Policy *string
// The identification number of the MFA device that is associated with the user
// who is making the AssumeRole call. Specify this value if the trust policy
// of the role being assumed includes a condition that requires MFA authentication.
// The value is either the serial number for a hardware device (such as GAHT12345678)
// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
SerialNumber *string
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
// is missing or expired, the AssumeRole call returns an "access denied" error.
TokenCode *string
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: sts.New(c),
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
// Takes an AssumeRoler which can be satisfiede by the STS client.
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: svc,
RoleARN: roleARN,
Duration: DefaultDuration,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
}
if p.Duration == 0 {
// Expire as often as AWS permits.
p.Duration = DefaultDuration
}
input := &sts.AssumeRoleInput{
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
RoleArn: aws.String(p.RoleARN),
RoleSessionName: aws.String(p.RoleSessionName),
ExternalId: p.ExternalID,
}
if p.Policy != nil {
input.Policy = p.Policy
}
if p.SerialNumber != nil && p.TokenCode != nil {
input.SerialNumber = p.SerialNumber
input.TokenCode = p.TokenCode
}
roleOutput, err := p.Client.AssumeRole(input)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// We will proactively generate new credentials before they expire.
p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
return credentials.Value{
AccessKeyID: *roleOutput.Credentials.AccessKeyId,
SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
SessionToken: *roleOutput.Credentials.SessionToken,
ProviderName: ProviderName,
}, nil
}

View File

@ -8,7 +8,6 @@
package defaults
import (
"fmt"
"net/http"
"os"
"time"
@ -17,7 +16,6 @@ import (
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
@ -68,7 +66,6 @@ func Handlers() request.Handlers {
var handlers request.Handlers
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
handlers.Build.AfterEachFn = request.HandlerListStopOnError
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
@ -85,45 +82,16 @@ func Handlers() request.Handlers {
// is available if you need to reset the credentials of an
// existing service client or session's Config.
func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
return credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
RemoteCredProvider(*cfg, handlers),
},
})
}
// RemoteCredProvider returns a credenitials provider for the default remote
// endpoints such as EC2 or ECS Roles.
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
if len(ecsCredURI) > 0 {
return ecsCredProvider(cfg, handlers, ecsCredURI)
}
return ec2RoleProvider(cfg, handlers)
}
func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
const host = `169.254.170.2`
return endpointcreds.NewProviderClient(cfg, handlers,
fmt.Sprintf("http://%s%s", host, uri),
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
},
)
}
func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName,
aws.StringValue(cfg.Region), true, false)
return &ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion),
ExpiryWindow: 5 * time.Minute,
}
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
ExpiryWindow: 5 * time.Minute,
},
}})
}

View File

@ -1,19 +1,12 @@
package ec2metadata
import (
"encoding/json"
"fmt"
"path"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
// GetMetadata uses the path provided to request information from the EC2
// instance metdata service. The content will be returned as a string, or
// error if the request failed.
// GetMetadata uses the path provided to request
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
op := &request.Operation{
Name: "GetMetadata",
@ -27,68 +20,6 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
return output.Content, req.Send()
}
// GetDynamicData uses the path provided to request information from the EC2
// instance metadata service for dynamic data. The content will be returned
// as a string, or error if the request failed.
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "dynamic", p),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
return output.Content, req.Send()
}
// GetInstanceIdentityDocument retrieves an identity document describing an
// instance. Error is returned if the request fails or is unable to parse
// the response.
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
resp, err := c.GetDynamicData("instance-identity/document")
if err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("EC2MetadataRequestError",
"failed to get EC2 instance identity document", err)
}
doc := EC2InstanceIdentityDocument{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("SerializationError",
"failed to decode EC2 instance identity document", err)
}
return doc, nil
}
// IAMInfo retrieves IAM info from the metadata API
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
resp, err := c.GetMetadata("iam/info")
if err != nil {
return EC2IAMInfo{},
awserr.New("EC2MetadataRequestError",
"failed to get EC2 IAM info", err)
}
info := EC2IAMInfo{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
return EC2IAMInfo{},
awserr.New("SerializationError",
"failed to decode EC2 IAM info", err)
}
if info.Code != "Success" {
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
return EC2IAMInfo{},
awserr.New("EC2MetadataError", errMsg, nil)
}
return info, nil
}
// Region returns the region the instance is running in.
func (c *EC2Metadata) Region() (string, error) {
resp, err := c.GetMetadata("placement/availability-zone")
@ -110,31 +41,3 @@ func (c *EC2Metadata) Available() bool {
return true
}
// An EC2IAMInfo provides the shape for unmarshalling
// an IAM info from the metadata API
type EC2IAMInfo struct {
Code string
LastUpdated time.Time
InstanceProfileArn string
InstanceProfileID string
}
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

View File

@ -1,24 +0,0 @@
package request
import (
"io"
"net/http"
"net/url"
)
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
req := new(http.Request)
*req = *r
req.URL = &url.URL{}
*req.URL = *r.URL
req.Body = body
req.Header = http.Header{}
for k, v := range r.Header {
for _, vv := range v {
req.Header.Add(k, vv)
}
}
return req
}

View File

@ -1,49 +0,0 @@
package request
import (
"io"
"sync"
)
// offsetReader is a thread-safe io.ReadCloser to prevent racing
// with retrying requests
type offsetReader struct {
buf io.ReadSeeker
lock sync.RWMutex
closed bool
}
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
reader := &offsetReader{}
buf.Seek(offset, 0)
reader.buf = buf
return reader
}
// Close is a thread-safe close. Uses the write lock.
func (o *offsetReader) Close() error {
o.lock.Lock()
defer o.lock.Unlock()
o.closed = true
return nil
}
// Read is a thread-safe read using a read lock.
func (o *offsetReader) Read(p []byte) (int, error) {
o.lock.RLock()
defer o.lock.RUnlock()
if o.closed {
return 0, io.EOF
}
return o.buf.Read(p)
}
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
// and close the old buffer.
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
o.Close()
return newOffsetReader(o.buf, offset)
}

View File

@ -12,7 +12,6 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client/metadata"
)
@ -39,7 +38,6 @@ type Request struct {
RetryDelay time.Duration
NotHoist bool
SignedHeaderVals http.Header
LastSignedAt time.Time
built bool
}
@ -73,15 +71,13 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
if method == "" {
method = "POST"
}
p := operation.HTTPPath
if p == "" {
p = "/"
}
httpReq, _ := http.NewRequest(method, "", nil)
var err error
httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
if err != nil {
httpReq.URL = &url.URL{}
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
}
httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p)
r := &Request{
Config: cfg,
@ -95,7 +91,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
HTTPRequest: httpReq,
Body: nil,
Params: params,
Error: err,
Error: nil,
Data: data,
}
r.SetBufferBody([]byte{})
@ -135,7 +131,7 @@ func (r *Request) SetStringBody(s string) {
// SetReaderBody will set the request's body reader.
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.HTTPRequest.Body = newOffsetReader(reader, 0)
r.HTTPRequest.Body = ioutil.NopCloser(reader)
r.Body = reader
}
@ -189,6 +185,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
// which occurred will be returned.
func (r *Request) Build() error {
if !r.built {
r.Error = nil
r.Handlers.Validate.Run(r)
if r.Error != nil {
debugLogReqError(r, "Validate Request", false, r.Error)
@ -205,7 +202,7 @@ func (r *Request) Build() error {
return r.Error
}
// Sign will sign the request returning error if errors are encountered.
// Sign will sign the request retuning error if errors are encountered.
//
// Send will build the request prior to signing. All Sign Handlers will
// be executed in the order they were set.
@ -224,53 +221,32 @@ func (r *Request) Sign() error {
//
// Send will sign the request prior to sending. All Send Handlers will
// be executed in the order they were set.
//
// Canceling a request is non-deterministic. If a request has been canceled,
// then the transport will choose, randomly, one of the state channels during
// reads or getting the connection.
//
// readLoop() and getConn(req *Request, cm connectMethod)
// https://github.com/golang/go/blob/master/src/net/http/transport.go
func (r *Request) Send() error {
for {
r.Sign()
if r.Error != nil {
return r.Error
}
if aws.BoolValue(r.Retryable) {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
var body io.ReadCloser
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
body = reader.CloseAndCopy(r.BodyStart)
} else {
if r.Config.Logger != nil {
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
}
r.Body.Seek(r.BodyStart, 0)
body = ioutil.NopCloser(r.Body)
}
// Closing response body. Since we are setting a new request to send off, this
// response will get squashed and leaked.
r.HTTPResponse.Body.Close()
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
// Closing response body. Since we are setting a new request to send off, this
// response will get squashed and leaked.
r.HTTPResponse.Body.Close()
}
// Re-seek the body back to the original point in for a retry so that
// send will send the body's contents again in the upcoming request.
r.Body.Seek(r.BodyStart, 0)
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
}
r.Sign()
if r.Error != nil {
return r.Error
}
r.Retryable = nil
r.Handlers.Send.Run(r)
if r.Error != nil {
if strings.Contains(r.Error.Error(), "net/http: request canceled") {
return r.Error
}
err := r.Error
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)

View File

@ -26,11 +26,8 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
// retryableCodes is a collection of service response codes which are retry-able
// without any further action.
var retryableCodes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
}
var throttleCodes = map[string]struct{}{
"RequestError": {},
"RequestTimeout": {},
"ProvisionedThroughputExceededException": {},
"Throttling": {},
"ThrottlingException": {},
@ -49,11 +46,6 @@ var credsExpiredCodes = map[string]struct{}{
"RequestExpired": {}, // EC2 Only
}
func isCodeThrottle(code string) bool {
_, ok := throttleCodes[code]
return ok
}
func isCodeRetryable(code string) bool {
if _, ok := retryableCodes[code]; ok {
return true
@ -78,17 +70,6 @@ func (r *Request) IsErrorRetryable() bool {
return false
}
// IsErrorThrottle returns whether the error is to be throttled based on its code.
// Returns false if the request has no Error set
func (r *Request) IsErrorThrottle() bool {
if r.Error != nil {
if err, ok := r.Error.(awserr.Error); ok {
return isCodeThrottle(err.Code())
}
}
return false
}
// IsErrorExpired returns whether the error code is a credential expiry error.
// Returns false if the request has no Error set.
func (r *Request) IsErrorExpired() bool {

View File

@ -1,234 +0,0 @@
package request
import (
"bytes"
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
)
const (
// InvalidParameterErrCode is the error code for invalid parameters errors
InvalidParameterErrCode = "InvalidParameter"
// ParamRequiredErrCode is the error code for required parameter errors
ParamRequiredErrCode = "ParamRequiredError"
// ParamMinValueErrCode is the error code for fields with too low of a
// number value.
ParamMinValueErrCode = "ParamMinValueError"
// ParamMinLenErrCode is the error code for fields without enough elements.
ParamMinLenErrCode = "ParamMinLenError"
)
// Validator provides a way for types to perform validation logic on their
// input values that external code can use to determine if a type's values
// are valid.
type Validator interface {
Validate() error
}
// An ErrInvalidParams provides wrapping of invalid parameter errors found when
// validating API operation input parameters.
type ErrInvalidParams struct {
// Context is the base context of the invalid parameter group.
Context string
errs []ErrInvalidParam
}
// Add adds a new invalid parameter error to the collection of invalid
// parameters. The context of the invalid parameter will be updated to reflect
// this collection.
func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
err.SetContext(e.Context)
e.errs = append(e.errs, err)
}
// AddNested adds the invalid parameter errors from another ErrInvalidParams
// value into this collection. The nested errors will have their nested context
// updated and base context to reflect the merging.
//
// Use for nested validations errors.
func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
for _, err := range nested.errs {
err.SetContext(e.Context)
err.AddNestedContext(nestedCtx)
e.errs = append(e.errs, err)
}
}
// Len returns the number of invalid parameter errors
func (e ErrInvalidParams) Len() int {
return len(e.errs)
}
// Code returns the code of the error
func (e ErrInvalidParams) Code() string {
return InvalidParameterErrCode
}
// Message returns the message of the error
func (e ErrInvalidParams) Message() string {
return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
}
// Error returns the string formatted form of the invalid parameters.
func (e ErrInvalidParams) Error() string {
w := &bytes.Buffer{}
fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
for _, err := range e.errs {
fmt.Fprintf(w, "- %s\n", err.Message())
}
return w.String()
}
// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
func (e ErrInvalidParams) OrigErr() error {
return awserr.NewBatchError(
InvalidParameterErrCode, e.Message(), e.OrigErrs())
}
// OrigErrs returns a slice of the invalid parameters
func (e ErrInvalidParams) OrigErrs() []error {
errs := make([]error, len(e.errs))
for i := 0; i < len(errs); i++ {
errs[i] = e.errs[i]
}
return errs
}
// An ErrInvalidParam represents an invalid parameter error type.
type ErrInvalidParam interface {
awserr.Error
// Field name the error occurred on.
Field() string
// SetContext updates the context of the error.
SetContext(string)
// AddNestedContext updates the error's context to include a nested level.
AddNestedContext(string)
}
type errInvalidParam struct {
context string
nestedContext string
field string
code string
msg string
}
// Code returns the error code for the type of invalid parameter.
func (e *errInvalidParam) Code() string {
return e.code
}
// Message returns the reason the parameter was invalid, and its context.
func (e *errInvalidParam) Message() string {
return fmt.Sprintf("%s, %s.", e.msg, e.Field())
}
// Error returns the string version of the invalid parameter error.
func (e *errInvalidParam) Error() string {
return fmt.Sprintf("%s: %s", e.code, e.Message())
}
// OrigErr returns nil, Implemented for awserr.Error interface.
func (e *errInvalidParam) OrigErr() error {
return nil
}
// Field Returns the field and context the error occurred.
func (e *errInvalidParam) Field() string {
field := e.context
if len(field) > 0 {
field += "."
}
if len(e.nestedContext) > 0 {
field += fmt.Sprintf("%s.", e.nestedContext)
}
field += e.field
return field
}
// SetContext updates the base context of the error.
func (e *errInvalidParam) SetContext(ctx string) {
e.context = ctx
}
// AddNestedContext prepends a context to the field's path.
func (e *errInvalidParam) AddNestedContext(ctx string) {
if len(e.nestedContext) == 0 {
e.nestedContext = ctx
} else {
e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
}
}
// An ErrParamRequired represents an required parameter error.
type ErrParamRequired struct {
errInvalidParam
}
// NewErrParamRequired creates a new required parameter error.
func NewErrParamRequired(field string) *ErrParamRequired {
return &ErrParamRequired{
errInvalidParam{
code: ParamRequiredErrCode,
field: field,
msg: fmt.Sprintf("missing required field"),
},
}
}
// An ErrParamMinValue represents a minimum value parameter error.
type ErrParamMinValue struct {
errInvalidParam
min float64
}
// NewErrParamMinValue creates a new minimum value parameter error.
func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
return &ErrParamMinValue{
errInvalidParam: errInvalidParam{
code: ParamMinValueErrCode,
field: field,
msg: fmt.Sprintf("minimum field value of %v", min),
},
min: min,
}
}
// MinValue returns the field's require minimum value.
//
// float64 is returned for both int and float min values.
func (e *ErrParamMinValue) MinValue() float64 {
return e.min
}
// An ErrParamMinLen represents a minimum length parameter error.
type ErrParamMinLen struct {
errInvalidParam
min int
}
// NewErrParamMinLen creates a new minimum length parameter error.
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
return &ErrParamMinLen{
errInvalidParam: errInvalidParam{
code: ParamMinValueErrCode,
field: field,
msg: fmt.Sprintf("minimum field size of %v", min),
},
min: min,
}
}
// MinLen returns the field's required minimum length.
func (e *ErrParamMinLen) MinLen() int {
return e.min
}

View File

@ -1,223 +0,0 @@
/*
Package session provides configuration for the SDK's service clients.
Sessions can be shared across all service clients that share the same base
configuration. The Session is built from the SDK's default configuration and
request handlers.
Sessions should be cached when possible, because creating a new Session will
load all configuration values from the environment, and config files each time
the Session is created. Sharing the Session value across all of your service
clients will ensure the configuration is loaded the fewest number of times possible.
Concurrency
Sessions are safe to use concurrently as long as the Session is not being
modified. The SDK will not modify the Session once the Session has been created.
Creating service clients concurrently from a shared Session is safe.
Sessions from Shared Config
Sessions can be created using the method above that will only load the
additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
Alternatively you can explicitly create a Session with shared config enabled.
To do this you can use NewSessionWithOptions to configure how the Session will
be created. Using the NewSessionWithOptions with SharedConfigState set to
SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG
environment variable was set.
Creating Sessions
When creating Sessions optional aws.Config values can be passed in that will
override the default, or loaded config values the Session is being created
with. This allows you to provide additional, or case based, configuration
as needed.
By default NewSession will only load credentials from the shared credentials
file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
set to a truthy value the Session will be created from the configuration
values from the shared config (~/.aws/config) and shared credentials
(~/.aws/credentials) files. See the section Sessions from Shared Config for
more information.
Create a Session with the default config and request handlers. With credentials
region, and profile loaded from the environment and shared config automatically.
Requires the AWS_PROFILE to be set, or "default" is used.
// Create Session
sess, err := session.NewSession()
// Create a Session with a custom region
sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")})
// Create a S3 client instance from a session
sess, err := session.NewSession()
if err != nil {
// Handle Session creation error
}
svc := s3.New(sess)
Create Session With Option Overrides
In addition to NewSession, Sessions can be created using NewSessionWithOptions.
This func allows you to control and override how the Session will be created
through code instead of being driven by environment variables only.
Use NewSessionWithOptions when you want to provide the config profile, or
override the shared config state (AWS_SDK_LOAD_CONFIG).
// Equivalent to session.New
sess, err := session.NewSessionWithOptions(session.Options{})
// Specify profile to load for the session's config
sess, err := session.NewSessionWithOptions(session.Options{
Profile: "profile_name",
})
// Specify profile for config and region for requests
sess, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{Region: aws.String("us-east-1")},
Profile: "profile_name",
})
// Force enable Shared Config support
sess, err := session.NewSessionWithOptions(session.Options{
SharedConfigState: SharedConfigEnable,
})
Adding Handlers
You can add handlers to a session for processing HTTP requests. All service
clients that use the session inherit the handlers. For example, the following
handler logs every request and its payload made by a service client:
// Create a session, and add additional handlers for all service
// clients created with the Session to inherit. Adds logging handler.
sess, err := session.NewSession()
sess.Handlers.Send.PushFront(func(r *request.Request) {
// Log every request made and its payload
logger.Println("Request: %s/%s, Payload: %s",
r.ClientInfo.ServiceName, r.Operation, r.Params)
})
Deprecated "New" function
The New session function has been deprecated because it does not provide good
way to return errors that occur when loading the configuration files and values.
Because of this, NewSession was created so errors can be retrieved when
creating a session fails.
Shared Config Fields
By default the SDK will only load the shared credentials file's (~/.aws/credentials)
credentials values, and all other config is provided by the environment variables,
SDK defaults, and user provided aws.Config values.
If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
option is used to create the Session the full shared config values will be
loaded. This includes credentials, region, and support for assume role. In
addition the Session will load its configuration from both the shared config
file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
files have the same format.
If both config files are present the configuration from both files will be
read. The Session will be created from configuration values from the shared
credentials file (~/.aws/credentials) over those in the shared credentials
file (~/.aws/config).
Credentials are the values the SDK should use for authenticating requests with
AWS Services. They arfrom a configuration file will need to include both
aws_access_key_id and aws_secret_access_key must be provided together in the
same file to be considered valid. The values will be ignored if not a complete
group. aws_session_token is an optional field that can be provided if both of
the other two fields are also provided.
aws_access_key_id = AKID
aws_secret_access_key = SECRET
aws_session_token = TOKEN
Assume Role values allow you to configure the SDK to assume an IAM role using
a set of credentials provided in a config file via the source_profile field.
Both "role_arn" and "source_profile" are required. The SDK does not support
assuming a role with MFA token Via the Session's constructor. You can use the
stscreds.AssumeRoleProvider credentials provider to specify custom
configuration and support for MFA.
role_arn = arn:aws:iam::<account_number>:role/<role_name>
source_profile = profile_with_creds
external_id = 1234
mfa_serial = not supported!
role_session_name = session_name
Region is the region the SDK should use for looking up AWS service endpoints
and signing requests.
region = us-east-1
Environment Variables
When a Session is created several environment variables can be set to adjust
how the SDK functions, and what configuration data it loads when creating
Sessions. All environment values are optional, but some values like credentials
require multiple of the values to set or the partial values will be ignored.
All environment variable values are strings unless otherwise noted.
Environment configuration values. If set both Access Key ID and Secret Access
Key must be provided. Session Token and optionally also be provided, but is
not required.
# Access Key ID
AWS_ACCESS_KEY_ID=AKID
AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
# Secret Access Key
AWS_SECRET_ACCESS_KEY=SECRET
AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
# Session Token
AWS_SESSION_TOKEN=TOKEN
Region value will instruct the SDK where to make service API requests to. If is
not provided in the environment the region must be provided before a service
client request is made.
AWS_REGION=us-east-1
# AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
# and AWS_REGION is not also set.
AWS_DEFAULT_REGION=us-east-1
Profile name the SDK should load use when loading shared config from the
configuration files. If not provided "default" will be used as the profile name.
AWS_PROFILE=my_profile
# AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
# and AWS_PROFILE is not also set.
AWS_DEFAULT_PROFILE=my_profile
SDK load config instructs the SDK to load the shared config in addition to
shared credentials. This also expands the configuration loaded so the shared
credentials will have parity with the shared config file. This also enables
Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
env values as well.
AWS_SDK_LOAD_CONFIG=1
Shared credentials file path can be set to instruct the SDK to use an alternative
file for the shared credentials. If not set the file will be loaded from
$HOME/.aws/credentials on Linux/Unix based systems, and
%USERPROFILE%\.aws\credentials on Windows.
AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
Shared config file path can be set to instruct the SDK to use an alternative
file for the shared config. If not set the file will be loaded from
$HOME/.aws/config on Linux/Unix based systems, and
%USERPROFILE%\.aws\config on Windows.
AWS_CONFIG_FILE=$HOME/my_shared_config
*/
package session

View File

@ -1,188 +0,0 @@
package session
import (
"os"
"path/filepath"
"strconv"
"github.com/aws/aws-sdk-go/aws/credentials"
)
// envConfig is a collection of environment values the SDK will read
// setup config from. All environment values are optional. But some values
// such as credentials require multiple values to be complete or the values
// will be ignored.
type envConfig struct {
// Environment configuration values. If set both Access Key ID and Secret Access
// Key must be provided. Session Token and optionally also be provided, but is
// not required.
//
// # Access Key ID
// AWS_ACCESS_KEY_ID=AKID
// AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
//
// # Secret Access Key
// AWS_SECRET_ACCESS_KEY=SECRET
// AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
//
// # Session Token
// AWS_SESSION_TOKEN=TOKEN
Creds credentials.Value
// Region value will instruct the SDK where to make service API requests to. If is
// not provided in the environment the region must be provided before a service
// client request is made.
//
// AWS_REGION=us-east-1
//
// # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
// # and AWS_REGION is not also set.
// AWS_DEFAULT_REGION=us-east-1
Region string
// Profile name the SDK should load use when loading shared configuration from the
// shared configuration files. If not provided "default" will be used as the
// profile name.
//
// AWS_PROFILE=my_profile
//
// # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
// # and AWS_PROFILE is not also set.
// AWS_DEFAULT_PROFILE=my_profile
Profile string
// SDK load config instructs the SDK to load the shared config in addition to
// shared credentials. This also expands the configuration loaded from the shared
// credentials to have parity with the shared config file. This also enables
// Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
// env values as well.
//
// AWS_SDK_LOAD_CONFIG=1
EnableSharedConfig bool
// Shared credentials file path can be set to instruct the SDK to use an alternate
// file for the shared credentials. If not set the file will be loaded from
// $HOME/.aws/credentials on Linux/Unix based systems, and
// %USERPROFILE%\.aws\credentials on Windows.
//
// AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
SharedCredentialsFile string
// Shared config file path can be set to instruct the SDK to use an alternate
// file for the shared config. If not set the file will be loaded from
// $HOME/.aws/config on Linux/Unix based systems, and
// %USERPROFILE%\.aws\config on Windows.
//
// AWS_CONFIG_FILE=$HOME/my_shared_config
SharedConfigFile string
}
var (
credAccessEnvKey = []string{
"AWS_ACCESS_KEY_ID",
"AWS_ACCESS_KEY",
}
credSecretEnvKey = []string{
"AWS_SECRET_ACCESS_KEY",
"AWS_SECRET_KEY",
}
credSessionEnvKey = []string{
"AWS_SESSION_TOKEN",
}
regionEnvKeys = []string{
"AWS_REGION",
"AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
}
profileEnvKeys = []string{
"AWS_PROFILE",
"AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
}
)
// loadEnvConfig retrieves the SDK's environment configuration.
// See `envConfig` for the values that will be retrieved.
//
// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
// the shared SDK config will be loaded in addition to the SDK's specific
// configuration values.
func loadEnvConfig() envConfig {
enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
return envConfigLoad(enableSharedConfig)
}
// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
// SDK shared config. See `envConfig` for the values that will be retrieved.
//
// Loads the shared configuration in addition to the SDK's specific configuration.
// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
// environment variable is set.
func loadSharedEnvConfig() envConfig {
return envConfigLoad(true)
}
func envConfigLoad(enableSharedConfig bool) envConfig {
cfg := envConfig{}
cfg.EnableSharedConfig = enableSharedConfig
setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
// Require logical grouping of credentials
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
cfg.Creds = credentials.Value{}
} else {
cfg.Creds.ProviderName = "EnvConfigCredentials"
}
regionKeys := regionEnvKeys
profileKeys := profileEnvKeys
if !cfg.EnableSharedConfig {
regionKeys = regionKeys[:1]
profileKeys = profileKeys[:1]
}
setFromEnvVal(&cfg.Region, regionKeys)
setFromEnvVal(&cfg.Profile, profileKeys)
cfg.SharedCredentialsFile = sharedCredentialsFilename()
cfg.SharedConfigFile = sharedConfigFilename()
return cfg
}
func setFromEnvVal(dst *string, keys []string) {
for _, k := range keys {
if v := os.Getenv(k); len(v) > 0 {
*dst = v
break
}
}
}
func sharedCredentialsFilename() string {
if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
return name
}
return filepath.Join(userHomeDir(), ".aws", "credentials")
}
func sharedConfigFilename() string {
if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
return name
}
return filepath.Join(userHomeDir(), ".aws", "config")
}
func userHomeDir() string {
homeDir := os.Getenv("HOME") // *nix
if len(homeDir) == 0 { // windows
homeDir = os.Getenv("USERPROFILE")
}
return homeDir
}

View File

@ -1,14 +1,17 @@
// Package session provides a way to create service clients with shared configuration
// and handlers.
//
// Generally this package should be used instead of the `defaults` package.
//
// A session should be used to share configurations and request handlers between multiple
// service clients. When service clients need specific configuration aws.Config can be
// used to provide additional configuration directly to the service client.
package session
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/endpoints"
@ -18,204 +21,36 @@ import (
// store configurations and request handlers for those services.
//
// Sessions are safe to create service clients concurrently, but it is not safe
// to mutate the Session concurrently.
//
// The Session satisfies the service client's client.ClientConfigProvider.
// to mutate the session concurrently.
type Session struct {
Config *aws.Config
Handlers request.Handlers
}
// New creates a new instance of the handlers merging in the provided configs
// on top of the SDK's default configurations. Once the Session is created it
// can be mutated to modify the Config or Handlers. The Session is safe to be
// read concurrently, but it should not be written to concurrently.
// New creates a new instance of the handlers merging in the provided Configs
// on top of the SDK's default configurations. Once the session is created it
// can be mutated to modify Configs or Handlers. The session is safe to be read
// concurrently, but it should not be written to concurrently.
//
// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
// method could now encounter an error when loading the configuration. When
// The environment variable is set, and an error occurs, New will return a
// session that will fail all requests reporting the error that occured while
// loading the session. Use NewSession to get the error when creating the
// session.
// Example:
// // Create a session with the default config and request handlers.
// sess := session.New()
//
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
// the shared config file (~/.aws/config) will also be loaded, in addition to
// the shared credentials file (~/.aws/config). Values set in both the
// shared config, and shared credentials will be taken from the shared
// credentials file.
// // Create a session with a custom region
// sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
//
// Deprecated: Use NewSession functiions to create sessions instead. NewSession
// has the same functionality as New except an error can be returned when the
// func is called instead of waiting to receive an error until a request is made.
// // Create a session, and add additional handlers for all service
// // clients created with the session to inherit. Adds logging handler.
// sess := session.New()
// sess.Handlers.Send.PushFront(func(r *request.Request) {
// // Log every request made and its payload
// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
// })
//
// // Create a S3 client instance from a session
// sess := session.New()
// svc := s3.New(sess)
func New(cfgs ...*aws.Config) *Session {
// load initial config from environment
envCfg := loadEnvConfig()
if envCfg.EnableSharedConfig {
s, err := newSession(envCfg, cfgs...)
if err != nil {
// Old session.New expected all errors to be discovered when
// a request is made, and would report the errors then. This
// needs to be replicated if an error occurs while creating
// the session.
msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
"Use session.NewSession to handle errors occuring during session creation."
// Session creation failed, need to report the error and prevent
// any requests from succeeding.
s = &Session{Config: defaults.Config()}
s.Config.MergeIn(cfgs...)
s.Config.Logger.Log("ERROR:", msg, "Error:", err)
s.Handlers.Validate.PushBack(func(r *request.Request) {
r.Error = err
})
}
return s
}
return oldNewSession(cfgs...)
}
// NewSession returns a new Session created from SDK defaults, config files,
// environment, and user provided config files. Once the Session is created
// it can be mutated to modify the Config or Handlers. The Session is safe to
// be read concurrently, but it should not be written to concurrently.
//
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
// the shared config file (~/.aws/config) will also be loaded in addition to
// the shared credentials file (~/.aws/config). Values set in both the
// shared config, and shared credentials will be taken from the shared
// credentials file. Enabling the Shared Config will also allow the Session
// to be built with retrieving credentials with AssumeRole set in the config.
//
// See the NewSessionWithOptions func for information on how to override or
// control through code how the Session will be created. Such as specifing the
// config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) {
envCfg := loadEnvConfig()
return newSession(envCfg, cfgs...)
}
// SharedConfigState provides the ability to optionally override the state
// of the session's creation based on the shared config being enabled or
// disabled.
type SharedConfigState int
const (
// SharedConfigStateFromEnv does not override any state of the
// AWS_SDK_LOAD_CONFIG env var. It is the default value of the
// SharedConfigState type.
SharedConfigStateFromEnv SharedConfigState = iota
// SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
// and disables the shared config functionality.
SharedConfigDisable
// SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
// and enables the shared config functionality.
SharedConfigEnable
)
// Options provides the means to control how a Session is created and what
// configuration values will be loaded.
//
type Options struct {
// Provides config values for the SDK to use when creating service clients
// and making API requests to services. Any value set in with this field
// will override the associated value provided by the SDK defaults,
// environment or config files where relevent.
//
// If not set, configuration values from from SDK defaults, environment,
// config will be used.
Config aws.Config
// Overrides the config profile the Session should be created from. If not
// set the value of the environment variable will be loaded (AWS_PROFILE,
// or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
//
// If not set and environment variables are not set the "default"
// (DefaultSharedConfigProfile) will be used as the profile to load the
// session config from.
Profile string
// Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
// environment variable. By default a Session will be created using the
// value provided by the AWS_SDK_LOAD_CONFIG environment variable.
//
// Setting this value to SharedConfigEnable or SharedConfigDisable
// will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
// and enable or disable the shared config functionality.
SharedConfigState SharedConfigState
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
// environment, and user provided config files. This func uses the Options
// values to configure how the Session is created.
//
// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
// the shared config file (~/.aws/config) will also be loaded in addition to
// the shared credentials file (~/.aws/config). Values set in both the
// shared config, and shared credentials will be taken from the shared
// credentials file. Enabling the Shared Config will also allow the Session
// to be built with retrieving credentials with AssumeRole set in the config.
//
// // Equivalent to session.New
// sess, err := session.NewSessionWithOptions(session.Options{})
//
// // Specify profile to load for the session's config
// sess, err := session.NewSessionWithOptions(session.Options{
// Profile: "profile_name",
// })
//
// // Specify profile for config and region for requests
// sess, err := session.NewSessionWithOptions(session.Options{
// Config: aws.Config{Region: aws.String("us-east-1")},
// Profile: "profile_name",
// })
//
// // Force enable Shared Config support
// sess, err := session.NewSessionWithOptions(session.Options{
// SharedConfigState: SharedConfigEnable,
// })
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
if opts.SharedConfigState == SharedConfigEnable {
envCfg = loadSharedEnvConfig()
} else {
envCfg = loadEnvConfig()
}
if len(opts.Profile) > 0 {
envCfg.Profile = opts.Profile
}
switch opts.SharedConfigState {
case SharedConfigDisable:
envCfg.EnableSharedConfig = false
case SharedConfigEnable:
envCfg.EnableSharedConfig = true
}
return newSession(envCfg, &opts.Config)
}
// Must is a helper function to ensure the Session is valid and there was no
// error when calling a NewSession function.
//
// This helper is intended to be used in variable initialization to load the
// Session and configuration at startup. Such as:
//
// var sess = session.Must(session.NewSession())
func Must(sess *Session, err error) *Session {
if err != nil {
panic(err)
}
return sess
}
func oldNewSession(cfgs ...*aws.Config) *Session {
cfg := defaults.Config()
handlers := defaults.Handlers()
@ -237,115 +72,6 @@ func oldNewSession(cfgs ...*aws.Config) *Session {
return s
}
func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
cfg := defaults.Config()
handlers := defaults.Handlers()
// Get a merged version of the user provided config to determine if
// credentials were.
userCfg := &aws.Config{}
userCfg.MergeIn(cfgs...)
// Order config files will be loaded in with later files overwriting
// previous config file values.
cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
if !envCfg.EnableSharedConfig {
// The shared config file (~/.aws/config) is only loaded if instructed
// to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
cfgFiles = cfgFiles[1:]
}
// Load additional config from file(s)
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
if err != nil {
return nil, err
}
mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers)
s := &Session{
Config: cfg,
Handlers: handlers,
}
initHandlers(s)
return s, nil
}
func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) {
// Merge in user provided configuration
cfg.MergeIn(userCfg)
// Region if not already set by user
if len(aws.StringValue(cfg.Region)) == 0 {
if len(envCfg.Region) > 0 {
cfg.WithRegion(envCfg.Region)
} else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
cfg.WithRegion(sharedCfg.Region)
}
}
// Configure credentials if not already set
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
if len(envCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
envCfg.Creds,
)
} else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
cfgCp := *cfg
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
cfg.Credentials = stscreds.NewCredentials(
&Session{
Config: &cfgCp,
Handlers: handlers.Copy(),
},
sharedCfg.AssumeRole.RoleARN,
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
// MFA not supported
},
)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.Creds,
)
} else {
// Fallback to default credentials provider, include mock errors
// for the credential chain so user can identify why credentials
// failed to be retrieved.
cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
Providers: []credentials.Provider{
&credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
&credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
defaults.RemoteCredProvider(*cfg, handlers),
},
})
}
}
}
type credProviderError struct {
Err error
}
var emptyCreds = credentials.Value{}
func (c credProviderError) Retrieve() (credentials.Value, error) {
return credentials.Value{}, c.Err
}
func (c credProviderError) IsExpired() bool {
return true
}
func initHandlers(s *Session) {
// Add the Validate parameter handler if it is not disabled.
s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
@ -354,12 +80,13 @@ func initHandlers(s *Session) {
}
}
// Copy creates and returns a copy of the current Session, coping the config
// Copy creates and returns a copy of the current session, coping the config
// and handlers. If any additional configs are provided they will be merged
// on top of the Session's copied config.
// on top of the session's copied config.
//
// // Create a copy of the current Session, configured for the us-west-2 region.
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
// Example:
// // Create a copy of the current session, configured for the us-west-2 region.
// sess.Copy(&aws.Config{Region: aws.String("us-west-2"})
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
@ -374,15 +101,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
// ClientConfig satisfies the client.ConfigProvider interface and is used to
// configure the service client instances. Passing the Session to the service
// client's constructor (New) will use this method to configure the client.
//
// Example:
// sess := session.New()
// s3.New(sess)
func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
s = s.Copy(cfgs...)
endpoint, signingRegion := endpoints.NormalizeEndpoint(
aws.StringValue(s.Config.Endpoint),
serviceName,
aws.StringValue(s.Config.Region),
aws.BoolValue(s.Config.DisableSSL),
aws.BoolValue(s.Config.UseDualStack),
)
aws.StringValue(s.Config.Endpoint), serviceName,
aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
return client.Config{
Config: s.Config,

View File

@ -1,294 +0,0 @@
package session
import (
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/go-ini/ini"
)
const (
// Static Credentials group
accessKeyIDKey = `aws_access_key_id` // group required
secretAccessKey = `aws_secret_access_key` // group required
sessionTokenKey = `aws_session_token` // optional
// Assume Role Credentials group
roleArnKey = `role_arn` // group required
sourceProfileKey = `source_profile` // group required
externalIDKey = `external_id` // optional
mfaSerialKey = `mfa_serial` // optional
roleSessionNameKey = `role_session_name` // optional
// Additional Config fields
regionKey = `region`
// DefaultSharedConfigProfile is the default profile to be used when
// loading configuration from the config files if another profile name
// is not provided.
DefaultSharedConfigProfile = `default`
)
type assumeRoleConfig struct {
RoleARN string
SourceProfile string
ExternalID string
MFASerial string
RoleSessionName string
}
// sharedConfig represents the configuration fields of the SDK config files.
type sharedConfig struct {
// Credentials values from the config file. Both aws_access_key_id
// and aws_secret_access_key must be provided together in the same file
// to be considered valid. The values will be ignored if not a complete group.
// aws_session_token is an optional field that can be provided if both of the
// other two fields are also provided.
//
// aws_access_key_id
// aws_secret_access_key
// aws_session_token
Creds credentials.Value
AssumeRole assumeRoleConfig
AssumeRoleSource *sharedConfig
// Region is the region the SDK should use for looking up AWS service endpoints
// and signing requests.
//
// region
Region string
}
type sharedConfigFile struct {
Filename string
IniData *ini.File
}
// loadSharedConfig retrieves the configuration from the list of files
// using the profile provided. The order the files are listed will determine
// precedence. Values in subsequent files will overwrite values defined in
// earlier files.
//
// For example, given two files A and B. Both define credentials. If the order
// of the files are A then B, B's credential values will be used instead of A's.
//
// See sharedConfig.setFromFile for information how the config files
// will be loaded.
func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
if len(profile) == 0 {
profile = DefaultSharedConfigProfile
}
files, err := loadSharedConfigIniFiles(filenames)
if err != nil {
return sharedConfig{}, err
}
cfg := sharedConfig{}
if err = cfg.setFromIniFiles(profile, files); err != nil {
return sharedConfig{}, err
}
if len(cfg.AssumeRole.SourceProfile) > 0 {
if err := cfg.setAssumeRoleSource(profile, files); err != nil {
return sharedConfig{}, err
}
}
return cfg, nil
}
func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
files := make([]sharedConfigFile, 0, len(filenames))
for _, filename := range filenames {
if _, err := os.Stat(filename); os.IsNotExist(err) {
// Trim files from the list that don't exist.
continue
}
f, err := ini.Load(filename)
if err != nil {
return nil, SharedConfigLoadError{Filename: filename}
}
files = append(files, sharedConfigFile{
Filename: filename, IniData: f,
})
}
return files, nil
}
func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
var assumeRoleSrc sharedConfig
// Multiple level assume role chains are not support
if cfg.AssumeRole.SourceProfile == origProfile {
assumeRoleSrc = *cfg
assumeRoleSrc.AssumeRole = assumeRoleConfig{}
} else {
err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
if err != nil {
return err
}
}
if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
}
cfg.AssumeRoleSource = &assumeRoleSrc
return nil
}
func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
// Trim files from the list that don't exist.
for _, f := range files {
if err := cfg.setFromIniFile(profile, f); err != nil {
if _, ok := err.(SharedConfigProfileNotExistsError); ok {
// Ignore proviles missings
continue
}
return err
}
}
return nil
}
// setFromFile loads the configuration from the file using
// the profile provided. A sharedConfig pointer type value is used so that
// multiple config file loadings can be chained.
//
// Only loads complete logically grouped values, and will not set fields in cfg
// for incomplete grouped values in the config. Such as credentials. For example
// if a config file only includes aws_access_key_id but no aws_secret_access_key
// the aws_access_key_id will be ignored.
func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
section, err := file.IniData.GetSection(profile)
if err != nil {
// Fallback to to alternate profile name: profile <name>
section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
if err != nil {
return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
}
}
// Shared Credentials
akid := section.Key(accessKeyIDKey).String()
secret := section.Key(secretAccessKey).String()
if len(akid) > 0 && len(secret) > 0 {
cfg.Creds = credentials.Value{
AccessKeyID: akid,
SecretAccessKey: secret,
SessionToken: section.Key(sessionTokenKey).String(),
ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
}
}
// Assume Role
roleArn := section.Key(roleArnKey).String()
srcProfile := section.Key(sourceProfileKey).String()
if len(roleArn) > 0 && len(srcProfile) > 0 {
cfg.AssumeRole = assumeRoleConfig{
RoleARN: roleArn,
SourceProfile: srcProfile,
ExternalID: section.Key(externalIDKey).String(),
MFASerial: section.Key(mfaSerialKey).String(),
RoleSessionName: section.Key(roleSessionNameKey).String(),
}
}
// Region
if v := section.Key(regionKey).String(); len(v) > 0 {
cfg.Region = v
}
return nil
}
// SharedConfigLoadError is an error for the shared config file failed to load.
type SharedConfigLoadError struct {
Filename string
Err error
}
// Code is the short id of the error.
func (e SharedConfigLoadError) Code() string {
return "SharedConfigLoadError"
}
// Message is the description of the error
func (e SharedConfigLoadError) Message() string {
return fmt.Sprintf("failed to load config file, %s", e.Filename)
}
// OrigErr is the underlying error that caused the failure.
func (e SharedConfigLoadError) OrigErr() error {
return e.Err
}
// Error satisfies the error interface.
func (e SharedConfigLoadError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
}
// SharedConfigProfileNotExistsError is an error for the shared config when
// the profile was not find in the config file.
type SharedConfigProfileNotExistsError struct {
Profile string
Err error
}
// Code is the short id of the error.
func (e SharedConfigProfileNotExistsError) Code() string {
return "SharedConfigProfileNotExistsError"
}
// Message is the description of the error
func (e SharedConfigProfileNotExistsError) Message() string {
return fmt.Sprintf("failed to get profile, %s", e.Profile)
}
// OrigErr is the underlying error that caused the failure.
func (e SharedConfigProfileNotExistsError) OrigErr() error {
return e.Err
}
// Error satisfies the error interface.
func (e SharedConfigProfileNotExistsError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
}
// SharedConfigAssumeRoleError is an error for the shared config when the
// profile contains assume role information, but that information is invalid
// or not complete.
type SharedConfigAssumeRoleError struct {
RoleARN string
}
// Code is the short id of the error.
func (e SharedConfigAssumeRoleError) Code() string {
return "SharedConfigAssumeRoleError"
}
// Message is the description of the error
func (e SharedConfigAssumeRoleError) Message() string {
return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
e.RoleARN)
}
// OrigErr is the underlying error that caused the failure.
func (e SharedConfigAssumeRoleError) OrigErr() error {
return nil
}
// Error satisfies the error interface.
func (e SharedConfigAssumeRoleError) Error() string {
return awserr.SprintError(e.Code(), e.Message(), "", nil)
}

View File

@ -1,82 +0,0 @@
package v4
import (
"net/http"
"strings"
)
// validator houses a set of rule needed for validation of a
// string value
type rules []rule
// rule interface allows for more flexible rules and just simply
// checks whether or not a value adheres to that rule
type rule interface {
IsValid(value string) bool
}
// IsValid will iterate through all rules and see if any rules
// apply to the value and supports nested rules
func (r rules) IsValid(value string) bool {
for _, rule := range r {
if rule.IsValid(value) {
return true
}
}
return false
}
// mapRule generic rule for maps
type mapRule map[string]struct{}
// IsValid for the map rule satisfies whether it exists in the map
func (m mapRule) IsValid(value string) bool {
_, ok := m[value]
return ok
}
// whitelist is a generic rule for whitelisting
type whitelist struct {
rule
}
// IsValid for whitelist checks if the value is within the whitelist
func (w whitelist) IsValid(value string) bool {
return w.rule.IsValid(value)
}
// blacklist is a generic rule for blacklisting
type blacklist struct {
rule
}
// IsValid for whitelist checks if the value is within the whitelist
func (b blacklist) IsValid(value string) bool {
return !b.rule.IsValid(value)
}
type patterns []string
// IsValid for patterns checks each pattern and returns if a match has
// been found
func (p patterns) IsValid(value string) bool {
for _, pattern := range p {
if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
return true
}
}
return false
}
// inclusiveRules rules allow for rules to depend on one another
type inclusiveRules []rule
// IsValid will return true if all rules are true
func (r inclusiveRules) IsValid(value string) bool {
for _, rule := range r {
if !rule.IsValid(value) {
return false
}
}
return true
}

View File

@ -1,665 +0,0 @@
// Package v4 implements signing for AWS V4 signer
//
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
package v4
import (
"bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/rest"
)
const (
authHeaderPrefix = "AWS4-HMAC-SHA256"
timeFormat = "20060102T150405Z"
shortTimeFormat = "20060102"
// emptyStringSHA256 is a SHA256 of an empty string
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
)
var ignoredHeaders = rules{
blacklist{
mapRule{
"Authorization": struct{}{},
"User-Agent": struct{}{},
},
},
}
// requiredSignedHeaders is a whitelist for build canonical headers.
var requiredSignedHeaders = rules{
whitelist{
mapRule{
"Cache-Control": struct{}{},
"Content-Disposition": struct{}{},
"Content-Encoding": struct{}{},
"Content-Language": struct{}{},
"Content-Md5": struct{}{},
"Content-Type": struct{}{},
"Expires": struct{}{},
"If-Match": struct{}{},
"If-Modified-Since": struct{}{},
"If-None-Match": struct{}{},
"If-Unmodified-Since": struct{}{},
"Range": struct{}{},
"X-Amz-Acl": struct{}{},
"X-Amz-Copy-Source": struct{}{},
"X-Amz-Copy-Source-If-Match": struct{}{},
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
"X-Amz-Copy-Source-If-None-Match": struct{}{},
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
"X-Amz-Copy-Source-Range": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Grant-Full-control": struct{}{},
"X-Amz-Grant-Read": struct{}{},
"X-Amz-Grant-Read-Acp": struct{}{},
"X-Amz-Grant-Write": struct{}{},
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
"X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
"X-Amz-Storage-Class": struct{}{},
"X-Amz-Website-Redirect-Location": struct{}{},
},
},
patterns{"X-Amz-Meta-"},
}
// allowedHoisting is a whitelist for build query headers. The boolean value
// represents whether or not it is a pattern.
var allowedQueryHoisting = inclusiveRules{
blacklist{requiredSignedHeaders},
patterns{"X-Amz-"},
}
// Signer applies AWS v4 signing to given request. Use this to sign requests
// that need to be signed with AWS V4 Signatures.
type Signer struct {
// The authentication credentials the request will be signed against.
// This value must be set to sign requests.
Credentials *credentials.Credentials
// Sets the log level the signer should use when reporting information to
// the logger. If the logger is nil nothing will be logged. See
// aws.LogLevelType for more information on available logging levels
//
// By default nothing will be logged.
Debug aws.LogLevelType
// The logger loging information will be written to. If there the logger
// is nil, nothing will be logged.
Logger aws.Logger
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
// request header to the request's query string. This is most commonly used
// with pre-signed requests preventing headers from being added to the
// request's query string.
DisableHeaderHoisting bool
// currentTimeFn returns the time value which represents the current time.
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
currentTimeFn func() time.Time
}
// NewSigner returns a Signer pointer configured with the credentials and optional
// option values provided. If not options are provided the Signer will use its
// default configuration.
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
v4 := &Signer{
Credentials: credentials,
}
for _, option := range options {
option(v4)
}
return v4
}
type signingCtx struct {
ServiceName string
Region string
Request *http.Request
Body io.ReadSeeker
Query url.Values
Time time.Time
ExpireTime time.Duration
SignedHeaderVals http.Header
credValues credentials.Value
isPresign bool
formattedTime string
formattedShortTime string
bodyDigest string
signedHeaders string
canonicalHeaders string
canonicalString string
credentialString string
stringToSign string
signature string
authorization string
}
// Sign signs AWS v4 requests with the provided body, service name, region the
// request is made to, and time the request is signed at. The signTime allows
// you to specify that a request is signed for the future, and cannot be
// used until then.
//
// Returns a list of HTTP headers that were included in the signature or an
// error if signing the request failed. Generally for signed requests this value
// is not needed as the full request context will be captured by the http.Request
// value. It is included for reference though.
//
// Sign will set the request's Body to be the `body` parameter passed in. If
// the body is not already an io.ReadCloser, it will be wrapped within one. If
// a `nil` body parameter passed to Sign, the request's Body field will be
// also set to nil. Its important to note that this functionality will not
// change the request's ContentLength of the request.
//
// Sign differs from Presign in that it will sign the request using HTTP
// header values. This type of signing is intended for http.Request values that
// will not be shared, or are shared in a way the header values on the request
// will not be lost.
//
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
// generated. To bypass the signer computing the hash you can set the
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
// only compute the hash if the request header value is empty.
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
return v4.signWithBody(r, body, service, region, 0, signTime)
}
// Presign signs AWS v4 requests with the provided body, service name, region
// the request is made to, and time the request is signed at. The signTime
// allows you to specify that a request is signed for the future, and cannot
// be used until then.
//
// Returns a list of HTTP headers that were included in the signature or an
// error if signing the request failed. For presigned requests these headers
// and their values must be included on the HTTP request when it is made. This
// is helpful to know what header values need to be shared with the party the
// presigned request will be distributed to.
//
// Presign differs from Sign in that it will sign the request using query string
// instead of header values. This allows you to share the Presigned Request's
// URL with third parties, or distribute it throughout your system with minimal
// dependencies.
//
// Presign also takes an exp value which is the duration the
// signed request will be valid after the signing time. This is allows you to
// set when the request will expire.
//
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
// generated. To bypass the signer computing the hash you can set the
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
// only compute the hash if the request header value is empty.
//
// Presigning a S3 request will not compute the body's SHA256 hash by default.
// This is done due to the general use case for S3 presigned URLs is to share
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
// presigned request's signature you can set the "X-Amz-Content-Sha256"
// HTTP header and that will be included in the request's signature.
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
return v4.signWithBody(r, body, service, region, exp, signTime)
}
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
currentTimeFn := v4.currentTimeFn
if currentTimeFn == nil {
currentTimeFn = time.Now
}
ctx := &signingCtx{
Request: r,
Body: body,
Query: r.URL.Query(),
Time: signTime,
ExpireTime: exp,
isPresign: exp != 0,
ServiceName: service,
Region: region,
}
if ctx.isRequestSigned() {
if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) {
// If the request is already signed, and the credentials have not
// expired, and the request is not too old ignore the signing request.
return ctx.SignedHeaderVals, nil
}
ctx.Time = currentTimeFn()
ctx.handlePresignRemoval()
}
var err error
ctx.credValues, err = v4.Credentials.Get()
if err != nil {
return http.Header{}, err
}
ctx.assignAmzQueryValues()
ctx.build(v4.DisableHeaderHoisting)
// If the request is not presigned the body should be attached to it. This
// prevents the confusion of wanting to send a signed request without
// the body the request was signed for attached.
if !ctx.isPresign {
var reader io.ReadCloser
if body != nil {
var ok bool
if reader, ok = body.(io.ReadCloser); !ok {
reader = ioutil.NopCloser(body)
}
}
r.Body = reader
}
if v4.Debug.Matches(aws.LogDebugWithSigning) {
v4.logSigningInfo(ctx)
}
return ctx.SignedHeaderVals, nil
}
func (ctx *signingCtx) handlePresignRemoval() {
if !ctx.isPresign {
return
}
// The credentials have expired for this request. The current signing
// is invalid, and needs to be request because the request will fail.
ctx.removePresign()
// Update the request's query string to ensure the values stays in
// sync in the case retrieving the new credentials fails.
ctx.Request.URL.RawQuery = ctx.Query.Encode()
}
func (ctx *signingCtx) assignAmzQueryValues() {
if ctx.isPresign {
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
if ctx.credValues.SessionToken != "" {
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
} else {
ctx.Query.Del("X-Amz-Security-Token")
}
return
}
if ctx.credValues.SessionToken != "" {
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
}
}
// SignRequestHandler is a named request handler the SDK will use to sign
// service client request with using the V4 signature.
var SignRequestHandler = request.NamedHandler{
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
}
// SignSDKRequest signs an AWS request with the V4 signature. This
// request handler is bested used only with the SDK's built in service client's
// API operation requests.
//
// This function should not be used on its on its own, but in conjunction with
// an AWS service client's API operation call. To sign a standalone request
// not created by a service client's API operation method use the "Sign" or
// "Presign" functions of the "Signer" type.
//
// If the credentials of the request's config are set to
// credentials.AnonymousCredentials the request will not be signed.
func SignSDKRequest(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now)
}
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
region := req.ClientInfo.SigningRegion
if region == "" {
region = aws.StringValue(req.Config.Region)
}
name := req.ClientInfo.SigningName
if name == "" {
name = req.ClientInfo.ServiceName
}
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
v4.Debug = req.Config.LogLevel.Value()
v4.Logger = req.Config.Logger
v4.DisableHeaderHoisting = req.NotHoist
v4.currentTimeFn = curTimeFn
})
signingTime := req.Time
if !req.LastSignedAt.IsZero() {
signingTime = req.LastSignedAt
}
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime)
if err != nil {
req.Error = err
req.SignedHeaderVals = nil
return
}
req.SignedHeaderVals = signedHeaders
req.LastSignedAt = curTimeFn()
}
const logSignInfoMsg = `DEBUG: Request Signature:
---[ CANONICAL STRING ]-----------------------------
%s
---[ STRING TO SIGN ]--------------------------------
%s%s
-----------------------------------------------------`
const logSignedURLMsg = `
---[ SIGNED URL ]------------------------------------
%s`
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
signedURLMsg := ""
if ctx.isPresign {
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
}
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
v4.Logger.Log(msg)
}
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
ctx.buildTime() // no depends
ctx.buildCredentialString() // no depends
unsignedHeaders := ctx.Request.Header
if ctx.isPresign {
if !disableHeaderHoisting {
urlValues := url.Values{}
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
for k := range urlValues {
ctx.Query[k] = urlValues[k]
}
}
}
ctx.buildBodyDigest()
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
ctx.buildCanonicalString() // depends on canon headers / signed headers
ctx.buildStringToSign() // depends on canon string
ctx.buildSignature() // depends on string to sign
if ctx.isPresign {
ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
} else {
parts := []string{
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
"SignedHeaders=" + ctx.signedHeaders,
"Signature=" + ctx.signature,
}
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
}
}
func (ctx *signingCtx) buildTime() {
ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
if ctx.isPresign {
duration := int64(ctx.ExpireTime / time.Second)
ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
} else {
ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
}
}
func (ctx *signingCtx) buildCredentialString() {
ctx.credentialString = strings.Join([]string{
ctx.formattedShortTime,
ctx.Region,
ctx.ServiceName,
"aws4_request",
}, "/")
if ctx.isPresign {
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
}
}
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
for k, h := range header {
if r.IsValid(k) {
query[k] = h
} else {
unsignedHeaders[k] = h
}
}
return query, unsignedHeaders
}
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
var headers []string
headers = append(headers, "host")
for k, v := range header {
canonicalKey := http.CanonicalHeaderKey(k)
if !r.IsValid(canonicalKey) {
continue // ignored header
}
if ctx.SignedHeaderVals == nil {
ctx.SignedHeaderVals = make(http.Header)
}
lowerCaseKey := strings.ToLower(k)
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
// include additional values
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
continue
}
headers = append(headers, lowerCaseKey)
ctx.SignedHeaderVals[lowerCaseKey] = v
}
sort.Strings(headers)
ctx.signedHeaders = strings.Join(headers, ";")
if ctx.isPresign {
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
}
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
headerValues[i] = "host:" + ctx.Request.URL.Host
} else {
headerValues[i] = k + ":" +
strings.Join(ctx.SignedHeaderVals[k], ",")
}
}
ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
}
func (ctx *signingCtx) buildCanonicalString() {
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
uri := ctx.Request.URL.Opaque
if uri != "" {
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
} else {
uri = ctx.Request.URL.Path
}
if uri == "" {
uri = "/"
}
if ctx.ServiceName != "s3" {
uri = rest.EscapePath(uri, false)
}
ctx.canonicalString = strings.Join([]string{
ctx.Request.Method,
uri,
ctx.Request.URL.RawQuery,
ctx.canonicalHeaders + "\n",
ctx.signedHeaders,
ctx.bodyDigest,
}, "\n")
}
func (ctx *signingCtx) buildStringToSign() {
ctx.stringToSign = strings.Join([]string{
authHeaderPrefix,
ctx.formattedTime,
ctx.credentialString,
hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
}, "\n")
}
func (ctx *signingCtx) buildSignature() {
secret := ctx.credValues.SecretAccessKey
date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
region := makeHmac(date, []byte(ctx.Region))
service := makeHmac(region, []byte(ctx.ServiceName))
credentials := makeHmac(service, []byte("aws4_request"))
signature := makeHmac(credentials, []byte(ctx.stringToSign))
ctx.signature = hex.EncodeToString(signature)
}
func (ctx *signingCtx) buildBodyDigest() {
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
if ctx.isPresign && ctx.ServiceName == "s3" {
hash = "UNSIGNED-PAYLOAD"
} else if ctx.Body == nil {
hash = emptyStringSHA256
} else {
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
}
if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
}
}
ctx.bodyDigest = hash
}
// isRequestSigned returns if the request is currently signed or presigned
func (ctx *signingCtx) isRequestSigned() bool {
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
return true
}
if ctx.Request.Header.Get("Authorization") != "" {
return true
}
return false
}
// unsign removes signing flags for both signed and presigned requests.
func (ctx *signingCtx) removePresign() {
ctx.Query.Del("X-Amz-Algorithm")
ctx.Query.Del("X-Amz-Signature")
ctx.Query.Del("X-Amz-Security-Token")
ctx.Query.Del("X-Amz-Date")
ctx.Query.Del("X-Amz-Expires")
ctx.Query.Del("X-Amz-Credential")
ctx.Query.Del("X-Amz-SignedHeaders")
}
func makeHmac(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
func makeSha256Reader(reader io.ReadSeeker) []byte {
hash := sha256.New()
start, _ := reader.Seek(0, 1)
defer reader.Seek(start, 0)
io.Copy(hash, reader)
return hash.Sum(nil)
}
const doubleSpaces = " "
var doubleSpaceBytes = []byte(doubleSpaces)
func stripExcessSpaces(headerVals []string) []string {
vals := make([]string, len(headerVals))
for i, str := range headerVals {
// Trim leading and trailing spaces
trimmed := strings.TrimSpace(str)
idx := strings.Index(trimmed, doubleSpaces)
var buf []byte
for idx > -1 {
// Multiple adjacent spaces found
if buf == nil {
// first time create the buffer
buf = []byte(trimmed)
}
stripToIdx := -1
for j := idx + 1; j < len(buf); j++ {
if buf[j] != ' ' {
buf = append(buf[:idx+1], buf[j:]...)
stripToIdx = j
break
}
}
if stripToIdx >= 0 {
idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
if idx >= 0 {
idx += stripToIdx
}
} else {
idx = -1
}
}
if buf != nil {
vals[i] = string(buf)
} else {
vals[i] = trimmed
}
}
return vals
}

View File

@ -61,41 +61,23 @@ func (r ReaderSeekerCloser) Close() error {
type WriteAtBuffer struct {
buf []byte
m sync.Mutex
// GrowthCoeff defines the growth rate of the internal buffer. By
// default, the growth rate is 1, where expanding the internal
// buffer will allocate only enough capacity to fit the new expected
// length.
GrowthCoeff float64
}
// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
// provided by buf.
func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
return &WriteAtBuffer{buf: buf}
}
// WriteAt writes a slice of bytes to a buffer starting at the position provided
// The number of bytes written will be returned, or error. Can overwrite previous
// written slices if the write ats overlap.
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
pLen := len(p)
expLen := pos + int64(pLen)
b.m.Lock()
defer b.m.Unlock()
expLen := pos + int64(len(p))
if int64(len(b.buf)) < expLen {
if int64(cap(b.buf)) < expLen {
if b.GrowthCoeff < 1 {
b.GrowthCoeff = 1
}
newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
copy(newBuf, b.buf)
b.buf = newBuf
}
b.buf = b.buf[:expLen]
newBuf := make([]byte, expLen)
copy(newBuf, b.buf)
b.buf = newBuf
}
copy(b.buf[pos:], p)
return pLen, nil
return len(p), nil
}
// Bytes returns a slice of bytes written to the buffer.

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.4.14"
const SDKVersion = "1.1.7"

View File

@ -14,9 +14,9 @@ import (
// normalized endpoint and signing region. If the endpoint is not an empty string
// the service name and region will be used to look up the service's API endpoint.
// If the endpoint is provided the scheme will be added if it is not present.
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) {
func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
if endpoint == "" {
return EndpointForRegion(serviceName, region, disableSSL, useDualStack)
return EndpointForRegion(serviceName, region, disableSSL)
}
return AddScheme(endpoint, disableSSL), ""
@ -24,17 +24,12 @@ func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDual
// EndpointForRegion returns an endpoint and its signing region for a service and region.
// if the service and region pair are not found endpoint and signingRegion will be empty.
func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) {
dualStackField := ""
if useDualStack {
dualStackField = "/dualstack"
}
func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
derivedKeys := []string{
region + "/" + svcName + dualStackField,
region + "/*" + dualStackField,
"*/" + svcName + dualStackField,
"*/*" + dualStackField,
region + "/" + svcName,
region + "/*",
"*/" + svcName,
"*/*",
}
for _, key := range derivedKeys {

View File

@ -8,9 +8,6 @@
"endpoint": "{service}.{region}.amazonaws.com.cn",
"signatureVersion": "v4"
},
"cn-north-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"us-gov-west-1/iam": {
"endpoint": "iam.us-gov.amazonaws.com"
},
@ -20,9 +17,6 @@
"us-gov-west-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"us-gov-west-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"*/cloudfront": {
"endpoint": "cloudfront.amazonaws.com",
"signingRegion": "us-east-1"
@ -36,7 +30,8 @@
"signingRegion": "us-east-1"
},
"*/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
"endpoint": "http://169.254.169.254/latest",
"signingRegion": "us-east-1"
},
"*/iam": {
"endpoint": "iam.amazonaws.com",
@ -65,9 +60,6 @@
"*/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"*/s3/dualstack": {
"endpoint": "s3.dualstack.{region}.amazonaws.com"
},
"us-east-1/s3": {
"endpoint": "s3.amazonaws.com"
},

View File

@ -31,7 +31,8 @@ var endpointsMap = endpointStruct{
SigningRegion: "us-east-1",
},
"*/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
Endpoint: "http://169.254.169.254/latest",
SigningRegion: "us-east-1",
},
"*/iam": {
Endpoint: "iam.amazonaws.com",
@ -48,9 +49,6 @@ var endpointsMap = endpointStruct{
"*/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"*/s3/dualstack": {
Endpoint: "s3.dualstack.{region}.amazonaws.com",
},
"*/sts": {
Endpoint: "sts.amazonaws.com",
SigningRegion: "us-east-1",
@ -62,9 +60,6 @@ var endpointsMap = endpointStruct{
"cn-north-1/*": {
Endpoint: "{service}.{region}.amazonaws.com.cn",
},
"cn-north-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"eu-central-1/s3": {
Endpoint: "{service}.{region}.amazonaws.com",
},
@ -75,9 +70,6 @@ var endpointsMap = endpointStruct{
Endpoint: "sdb.amazonaws.com",
SigningRegion: "us-east-1",
},
"us-gov-west-1/ec2metadata": {
Endpoint: "http://169.254.169.254/latest",
},
"us-gov-west-1/iam": {
Endpoint: "iam.us-gov.amazonaws.com",
},

View File

@ -1,4 +1,4 @@
// Package query provides serialization of AWS query requests, and responses.
// Package query provides serialisation of AWS query requests, and responses.
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go

View File

@ -2,7 +2,7 @@ package query
import (
"encoding/xml"
"io/ioutil"
"io"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
@ -15,10 +15,6 @@ type xmlErrorResponse struct {
RequestID string `xml:"RequestId"`
}
type xmlServiceUnavailableResponse struct {
XMLName xml.Name `xml:"ServiceUnavailableException"`
}
// UnmarshalErrorHandler is a name request handler to unmarshal request errors
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
@ -26,16 +22,11 @@ var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalEr
func UnmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
return
}
// First check for specific error
resp := xmlErrorResponse{}
decodeErr := xml.Unmarshal(bodyBytes, &resp)
if decodeErr == nil {
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
} else {
reqID := resp.RequestID
if reqID == "" {
reqID = r.RequestID
@ -45,22 +36,5 @@ func UnmarshalError(r *request.Request) {
r.HTTPResponse.StatusCode,
reqID,
)
return
}
// Check for unhandled error
servUnavailResp := xmlServiceUnavailableResponse{}
unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
if unavailErr == nil {
r.Error = awserr.NewRequestFailure(
awserr.New("ServiceUnavailableException", "service is unavailable", nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Failed to retrieve any error message from the response body
r.Error = awserr.New("SerializationError",
"failed to decode query XML error response", decodeErr)
}

View File

@ -222,7 +222,8 @@ func EscapePath(path string, encodeSep bool) string {
if noEscape[c] || (c == '/' && !encodeSep) {
buf.WriteByte(c)
} else {
fmt.Fprintf(&buf, "%%%02X", c)
buf.WriteByte('%')
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
}
}
return buf.String()

View File

@ -3,7 +3,6 @@ package rest
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"net/http"
"reflect"
@ -52,7 +51,6 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
if payload.IsValid() {
switch payload.Interface().(type) {
case []byte:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
@ -60,7 +58,6 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
payload.Set(reflect.ValueOf(b))
}
case *string:
defer r.HTTPResponse.Body.Close()
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
@ -75,8 +72,6 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
case "aws.ReadSeekCloser", "io.ReadCloser":
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
default:
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
defer r.HTTPResponse.Body.Close()
r.Error = awserr.New("SerializationError",
"failed to decode REST response",
fmt.Errorf("unknown payload type %s", payload.Type()))

View File

@ -1,4 +1,4 @@
// Package restxml provides RESTful XML serialization of AWS
// Package restxml provides RESTful XML serialisation of AWS
// requests and responses.
package restxml

View File

@ -1,4 +1,4 @@
// Package xmlutil provides XML serialization of AWS requests and responses.
// Package xmlutil provides XML serialisation of AWS requests and responses.
package xmlutil
import (

File diff suppressed because it is too large Load Diff

View File

@ -6,41 +6,32 @@ import (
)
func init() {
initClient = defaultInitClientFn
initRequest = defaultInitRequestFn
}
initClient = func(c *client.Client) {
// Support building custom host-style bucket endpoints
c.Handlers.Build.PushFront(updateHostWithBucket)
func defaultInitClientFn(c *client.Client) {
// Support building custom endpoints based on config
c.Handlers.Build.PushFront(updateEndpointForS3Config)
// Require SSL when using SSE keys
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
c.Handlers.Build.PushBack(computeSSEKeys)
// Require SSL when using SSE keys
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
c.Handlers.Build.PushBack(computeSSEKeys)
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
}
// S3 uses custom error unmarshaling logic
c.Handlers.UnmarshalError.Clear()
c.Handlers.UnmarshalError.PushBack(unmarshalError)
}
func defaultInitRequestFn(r *request.Request) {
// Add reuest handlers for specific platforms.
// e.g. 100-continue support for PUT requests using Go 1.6
platformRequestHandlers(r)
switch r.Operation.Name {
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
opPutBucketReplication:
// These S3 operations require Content-MD5 to be set
r.Handlers.Build.PushBack(contentMD5)
case opGetBucketLocation:
// GetBucketLocation has custom parsing logic
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
case opCreateBucket:
// Auto-populate LocationConstraint with current region
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
initRequest = func(r *request.Request) {
switch r.Operation.Name {
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration:
// These S3 operations require Content-MD5 to be set
r.Handlers.Build.PushBack(contentMD5)
case opGetBucketLocation:
// GetBucketLocation has custom parsing logic
r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
case opCreateBucket:
// Auto-populate LocationConstraint with current region
r.Handlers.Validate.PushFront(populateLocationConstraint)
case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
}
}
}

View File

@ -1,132 +1,14 @@
package s3
import (
"fmt"
"net/url"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
// an operationBlacklist is a list of operation names that should a
// request handler should not be executed with.
type operationBlacklist []string
// Continue will return true of the Request's operation name is not
// in the blacklist. False otherwise.
func (b operationBlacklist) Continue(r *request.Request) bool {
for i := 0; i < len(b); i++ {
if b[i] == r.Operation.Name {
return false
}
}
return true
}
var accelerateOpBlacklist = operationBlacklist{
opListBuckets, opCreateBucket, opDeleteBucket,
}
// Request handler to automatically add the bucket name to the endpoint domain
// if possible. This style of bucket is valid for all bucket names which are
// DNS compatible and do not contain "."
func updateEndpointForS3Config(r *request.Request) {
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
useDualStack := aws.BoolValue(r.Config.UseDualStack)
if useDualStack && accelerate {
r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("configuration aws.Config.UseDualStack is not compatible with aws.Config.Accelerate"),
nil)
return
}
if accelerate && accelerateOpBlacklist.Continue(r) {
if forceHostStyle {
if r.Config.Logger != nil {
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
}
}
updateEndpointForAccelerate(r)
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
updateEndpointForHostStyle(r)
}
}
func updateEndpointForHostStyle(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucketname was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
// bucket name must be valid to put into the host
return
}
moveBucketToHost(r.HTTPRequest.URL, bucket)
}
func updateEndpointForAccelerate(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
// Ignore operation requests if the bucketname was not provided
// if this is an input validation error the validation handler
// will report it.
return
}
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket),
nil)
return
}
// Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
moveBucketToHost(r.HTTPRequest.URL, bucket)
}
// Attempts to retrieve the bucket name from the request input parameters.
// If no bucket is found, or the field is empty "", false will be returned.
func bucketNameFromReqParams(params interface{}) (string, bool) {
b, _ := awsutil.ValuesAtPath(params, "Bucket")
if len(b) == 0 {
return "", false
}
if bucket, ok := b[0].(*string); ok {
if bucketStr := aws.StringValue(bucket); bucketStr != "" {
return bucketStr, true
}
}
return "", false
}
// hostCompatibleBucketName returns true if the request should
// put the bucket in the host. This is false if S3ForcePathStyle is
// explicitly set or if the bucket is not DNS compatible.
func hostCompatibleBucketName(u *url.URL, bucket string) bool {
// Bucket might be DNS compatible but dots in the hostname will fail
// certificate validation, so do not use host-style.
if u.Scheme == "https" && strings.Contains(bucket, ".") {
return false
}
// if the bucket is DNS compatible
return dnsCompatibleBucketName(bucket)
}
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
@ -138,36 +20,41 @@ func dnsCompatibleBucketName(bucket string) bool {
!strings.Contains(bucket, "..")
}
// moveBucketToHost moves the bucket name from the URI path to URL host.
func moveBucketToHost(u *url.URL, bucket string) {
u.Host = bucket + "." + u.Host
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
if u.Path == "" {
u.Path = "/"
// hostStyleBucketName returns true if the request should put the bucket in
// the host. This is false if S3ForcePathStyle is explicitly set or if the
// bucket is not DNS compatible.
func hostStyleBucketName(r *request.Request, bucket string) bool {
if aws.BoolValue(r.Config.S3ForcePathStyle) {
return false
}
// Bucket might be DNS compatible but dots in the hostname will fail
// certificate validation, so do not use host-style.
if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") {
return false
}
// GetBucketLocation should be able to be called from any region within
// a partition, and return the associated region of the bucket.
if r.Operation.Name == opGetBucketLocation {
return false
}
// Use host-style if the bucket is DNS compatible
return dnsCompatibleBucketName(bucket)
}
const s3HostPrefix = "s3"
// replaceHostRegion replaces the S3 region string in the host with the
// value provided. If v is empty the host prefix returned will be s3.
func replaceHostRegion(host, v string) string {
if !strings.HasPrefix(host, s3HostPrefix) {
return host
func updateHostWithBucket(r *request.Request) {
b, _ := awsutil.ValuesAtPath(r.Params, "Bucket")
if len(b) == 0 {
return
}
suffix := host[len(s3HostPrefix):]
for i := len(s3HostPrefix); i < len(host); i++ {
if host[i] == '.' {
// Trim until '.' leave the it in place.
suffix = host[i:]
break
if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) {
r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host
r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1)
if r.HTTPRequest.URL.Path == "" {
r.HTTPRequest.URL.Path = "/"
}
}
if len(v) == 0 {
return fmt.Sprintf("s3%s", suffix)
}
return fmt.Sprintf("s3-%s%s", v, suffix)
}

View File

@ -1,8 +0,0 @@
// +build !go1.6
package s3
import "github.com/aws/aws-sdk-go/aws/request"
func platformRequestHandlers(r *request.Request) {
}

View File

@ -1,28 +0,0 @@
// +build go1.6
package s3
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
)
func platformRequestHandlers(r *request.Request) {
if r.Operation.HTTPMethod == "PUT" {
// 100-Continue should only be used on put requests.
r.Handlers.Sign.PushBack(add100Continue)
}
}
func add100Continue(r *request.Request) {
if aws.BoolValue(r.Config.S3Disable100Continue) {
return
}
if r.HTTPRequest.ContentLength < 1024*1024*2 {
// Ignore requests smaller than 2MB. This helps prevent delaying
// requests unnecessarily.
return
}
r.HTTPRequest.Header.Set("Expect", "100-Continue")
}

View File

@ -1,11 +1,6 @@
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
// Package s3iface provides an interface to enable mocking the Amazon Simple Storage Service service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
// Package s3iface provides an interface for the Amazon Simple Storage Service.
package s3iface
import (
@ -13,51 +8,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3"
)
// S3API provides an interface to enable mocking the
// s3.S3 service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // Amazon Simple Storage Service.
// func myFunc(svc s3iface.S3API) bool {
// // Make svc.AbortMultipartUpload request
// }
//
// func main() {
// sess := session.New()
// svc := s3.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockS3Client struct {
// s3iface.S3API
// }
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
// // mock response/functionality
// }
//
// TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockS3Client{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
// S3API is the interface type for s3.S3.
type S3API interface {
AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput)
@ -115,10 +66,6 @@ type S3API interface {
DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error)
GetBucketAccelerateConfigurationRequest(*s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput)
GetBucketAccelerateConfiguration(*s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error)
GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput)
GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error)
@ -217,22 +164,12 @@ type S3API interface {
ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error
ListObjectsV2Request(*s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output)
ListObjectsV2(*s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error)
ListObjectsV2Pages(*s3.ListObjectsV2Input, func(*s3.ListObjectsV2Output, bool) bool) error
ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput)
ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error)
ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error
PutBucketAccelerateConfigurationRequest(*s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput)
PutBucketAccelerateConfiguration(*s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error)
PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput)
PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error)
@ -304,14 +241,6 @@ type S3API interface {
UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput)
UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error)
WaitUntilBucketExists(*s3.HeadBucketInput) error
WaitUntilBucketNotExists(*s3.HeadBucketInput) error
WaitUntilObjectExists(*s3.HeadObjectInput) error
WaitUntilObjectNotExists(*s3.HeadObjectInput) error
}
var _ S3API = (*s3.S3)(nil)

View File

@ -8,7 +8,6 @@ import (
"strings"
"sync"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
@ -35,7 +34,7 @@ type Downloader struct {
PartSize int64
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultDownloadConcurrency value will be used.
// If this is set to zero, the DefaultConcurrency value will be used.
Concurrency int
// An S3 client to use when performing downloads.
@ -50,13 +49,13 @@ type Downloader struct {
//
// Example:
// // The session the S3 Downloader will use
// sess, err := session.NewSession()
// sess := session.New()
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a downloader with the session and custom options
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
@ -78,17 +77,14 @@ func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downl
// to make S3 API calls.
//
// Example:
// // The session the S3 Downloader will use
// sess, err := session.NewSession()
//
// // The S3 client the S3 Downloader will use
// s3Svc := s3.new(sess)
// s3Svc := s3.new(session.New())
//
// // Create a downloader with the s3 client and default options
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
//
// // Create a downloader with the s3 client and custom options
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
@ -104,16 +100,12 @@ func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *D
return d
}
type maxRetrier interface {
MaxRetries() int
}
// Download downloads an object in S3 and writes the payload into w using
// concurrent GET requests.
//
// Additional functional options can be provided to configure the individual
// download. These options are copies of the Downloader instance Download is called from.
// Modifying the options will not impact the original Downloader instance.
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// It is safe to call this method concurrently across goroutines.
//
@ -126,19 +118,6 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ..
option(&impl.ctx)
}
if s, ok := d.S3.(maxRetrier); ok {
impl.partBodyMaxRetries = s.MaxRetries()
}
impl.totalBytes = -1
if impl.ctx.Concurrency == 0 {
impl.ctx.Concurrency = DefaultDownloadConcurrency
}
if impl.ctx.PartSize == 0 {
impl.ctx.PartSize = DefaultDownloadPartSize
}
return impl.download()
}
@ -156,13 +135,26 @@ type downloader struct {
totalBytes int64
written int64
err error
}
partBodyMaxRetries int
// init initializes the downloader with default options.
func (d *downloader) init() {
d.totalBytes = -1
if d.ctx.Concurrency == 0 {
d.ctx.Concurrency = DefaultDownloadConcurrency
}
if d.ctx.PartSize == 0 {
d.ctx.PartSize = DefaultDownloadPartSize
}
}
// download performs the implementation of the object download across ranged
// GETs.
func (d *downloader) download() (n int64, err error) {
d.init()
// Spin off first worker to check additional header information
d.getChunk()
@ -178,7 +170,7 @@ func (d *downloader) download() (n int64, err error) {
// Assign work
for d.getErr() == nil {
if d.pos >= total {
break // We're finished queuing chunks
break // We're finished queueing chunks
}
// Queue the next range of bytes to read.
@ -219,82 +211,49 @@ func (d *downloader) downloadPart(ch chan dlchunk) {
defer d.wg.Done()
for {
chunk, ok := <-ch
if !ok || d.getErr() != nil {
break
}
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
if !ok {
break
}
d.downloadChunk(chunk)
}
}
// getChunk grabs a chunk of data from the body.
// Not thread safe. Should only used when grabbing data on a single thread.
func (d *downloader) getChunk() {
if d.getErr() != nil {
return
}
chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize}
d.pos += d.ctx.PartSize
if err := d.downloadChunk(chunk); err != nil {
d.setErr(err)
}
d.downloadChunk(chunk)
}
// downloadChunk downloads the chunk froom s3
func (d *downloader) downloadChunk(chunk dlchunk) error {
func (d *downloader) downloadChunk(chunk dlchunk) {
if d.getErr() != nil {
return
}
// Get the next byte range of data
in := &s3.GetObjectInput{}
awsutil.Copy(in, d.in)
// Get the next byte range of data
rng := fmt.Sprintf("bytes=%d-%d", chunk.start, chunk.start+chunk.size-1)
rng := fmt.Sprintf("bytes=%d-%d",
chunk.start, chunk.start+chunk.size-1)
in.Range = &rng
var n int64
var err error
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
req, resp := d.ctx.S3.GetObjectRequest(in)
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
req, resp := d.ctx.S3.GetObjectRequest(in)
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
err := req.Send()
err = req.Send()
if err != nil {
return err
}
if err != nil {
d.setErr(err)
} else {
d.setTotalBytes(resp) // Set total if not yet set.
n, err = io.Copy(&chunk, resp.Body)
n, err := io.Copy(&chunk, resp.Body)
resp.Body.Close()
if err == nil {
break
if err != nil {
d.setErr(err)
}
chunk.cur = 0
logMessage(d.ctx.S3, aws.LogDebugWithRequestRetries,
fmt.Sprintf("DEBUG: object part body download interrupted %s, err, %v, retrying attempt %d",
aws.StringValue(in.Key), err, retry))
}
d.incrWritten(n)
return err
}
func logMessage(svc s3iface.S3API, level aws.LogLevelType, msg string) {
s, ok := svc.(*s3.S3)
if !ok {
return
}
if s.Config.Logger == nil {
return
}
if s.Config.LogLevel.Matches(level) {
s.Config.Logger.Log(msg)
d.incrWritten(n)
}
}

View File

@ -8,7 +8,6 @@ import (
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
@ -43,7 +42,7 @@ const DefaultUploadConcurrency = 5
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// if multierr, ok := err.(MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
@ -166,7 +165,7 @@ type UploadInput struct {
// requests for an object protected by AWS KMS will fail if not made via SSL
// or using SigV4. Documentation on configuring any of the officially supported
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
// The Server-side encryption algorithm used when storing this object in S3
// (e.g., AES256, aws:kms).
@ -209,7 +208,7 @@ type Uploader struct {
PartSize int64
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultUploadConcurrency value will be used.
// If this is set to zero, the DefaultConcurrency value will be used.
Concurrency int
// Setting this value to true will cause the SDK to avoid calling
@ -238,7 +237,7 @@ type Uploader struct {
//
// Example:
// // The session the S3 Uploader will use
// sess, err := session.NewSession()
// sess := session.New()
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
@ -268,11 +267,8 @@ func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader
// a S3 service client to make S3 API calls.
//
// Example:
// // The session the S3 Uploader will use
// sess, err := session.NewSession()
//
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(sess)
// s3Svc := s3.New(session.New())
//
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
@ -321,7 +317,7 @@ func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploa
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
// u.LeavePartsOnError = true // Dont delete the parts if the upload fails.
// })
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
i := uploader{in: input, ctx: u}
@ -354,15 +350,15 @@ func (u *uploader) upload() (*UploadOutput, error) {
}
// Do one read to determine if we have more than one part
reader, _, err := u.nextReader()
if err == io.EOF { // single part
return u.singlePart(reader)
buf, err := u.nextReader()
if err == io.EOF || err == io.ErrUnexpectedEOF { // single part
return u.singlePart(buf)
} else if err != nil {
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
}
mu := multiuploader{uploader: u}
return mu.upload(reader)
return mu.upload(buf)
}
// init will initialize all default options.
@ -408,49 +404,38 @@ func (u *uploader) initSize() {
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, int, error) {
type readerAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
func (u *uploader) nextReader() (io.ReadSeeker, error) {
switch r := u.in.Body.(type) {
case readerAtSeeker:
case io.ReaderAt:
var err error
n := u.ctx.PartSize
if u.totalSize >= 0 {
bytesLeft := u.totalSize - u.readerPos
if bytesLeft <= u.ctx.PartSize {
if bytesLeft == 0 {
err = io.EOF
n = bytesLeft
} else if bytesLeft <= u.ctx.PartSize {
err = io.ErrUnexpectedEOF
n = bytesLeft
}
}
reader := io.NewSectionReader(r, u.readerPos, n)
buf := io.NewSectionReader(r, u.readerPos, n)
u.readerPos += n
return reader, int(n), err
return buf, err
default:
part := make([]byte, u.ctx.PartSize)
n, err := readFillBuf(r, part)
packet := make([]byte, u.ctx.PartSize)
n, err := io.ReadFull(u.in.Body, packet)
u.readerPos += int64(n)
return bytes.NewReader(part[0:n]), n, err
return bytes.NewReader(packet[0:n]), err
}
}
func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
for offset < len(b) && err == nil {
var n int
n, err = r.Read(b[offset:])
offset += n
}
return offset, err
}
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
@ -522,9 +507,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
ch <- chunk{buf: firstBuf, num: num}
// Read and queue the rest of the parts
var err error
for u.geterr() == nil && err == nil {
num++
for u.geterr() == nil {
// This upload exceeded maximum number of supported parts, error now.
if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
var msg string
@ -538,27 +521,22 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
break
}
num++
var reader io.ReadSeeker
var nextChunkLen int
reader, nextChunkLen, err = u.nextReader()
buf, err := u.nextReader()
if err == io.EOF {
break
}
if err != nil && err != io.EOF {
ch <- chunk{buf: buf, num: num}
if err != nil && err != io.ErrUnexpectedEOF {
u.seterr(awserr.New(
"ReadRequestBody",
"read multipart upload data failed",
err))
break
}
if nextChunkLen == 0 {
// No need to upload empty part, if file was empty to start
// with empty single part would of been created and never
// started multipart upload.
break
}
ch <- chunk{buf: reader, num: num}
}
// Close the channel, wait for workers, and complete upload
@ -576,7 +554,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
}
}
return &UploadOutput{
Location: aws.StringValue(complete.Location),
Location: *complete.Location,
VersionID: complete.VersionId,
UploadID: u.uploadID,
}, nil

View File

@ -7,8 +7,8 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/restxml"
"github.com/aws/aws-sdk-go/private/signer/v4"
)
// S3 is a client for Amazon S3.
@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)

View File

@ -4,7 +4,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
@ -21,10 +20,7 @@ type xmlErrorResponse struct {
func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError",
@ -37,29 +33,25 @@ func unmarshalError(r *request.Request) {
return
}
var errCode, errMsg string
if r.HTTPResponse.ContentLength == 0 {
// No body, use status code to generate an awserr.Error
r.Error = awserr.NewRequestFailure(
awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Attempt to parse error from body if it is known
resp := &xmlErrorResponse{}
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
if err != nil && err != io.EOF {
errCode = "SerializationError"
errMsg = "failed to decode S3 XML error response"
r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil)
} else {
errCode = resp.Code
errMsg = resp.Message
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}
// Fallback to status code converted to message if still no error code
if len(errCode) == 0 {
statusText := http.StatusText(r.HTTPResponse.StatusCode)
errCode = strings.Replace(statusText, " ", "", -1)
errMsg = statusText
}
r.Error = awserr.NewRequestFailure(
awserr.New(errCode, errMsg, nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
}

View File

@ -18,12 +18,6 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
Argument: "",
Expected: 200,
},
{
State: "success",
Matcher: "status",
Argument: "",
Expected: 301,
},
{
State: "success",
Matcher: "status",

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
package sts
import "github.com/aws/aws-sdk-go/aws/request"
func init() {
initRequest = func(r *request.Request) {
switch r.Operation.Name {
case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
r.Handlers.Sign.Clear() // these operations are unsigned
}
}
}

View File

@ -1,130 +0,0 @@
// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package sts
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol/query"
)
// The AWS Security Token Service (STS) is a web service that enables you to
// request temporary, limited-privilege credentials for AWS Identity and Access
// Management (IAM) users or for users that you authenticate (federated users).
// This guide provides descriptions of the STS API. For more detailed information
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// As an alternative to using the API, you can use one of the AWS SDKs, which
// consist of libraries and sample code for various programming languages and
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
// way to create programmatic access to STS. For example, the SDKs take care
// of cryptographically signing requests, managing errors, and retrying requests
// automatically. For information about the AWS SDKs, including how to download
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
//
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// in the AWS General Reference. For general information about the Query API,
// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// in Using IAM. For information about using security tokens with other AWS
// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
// in the IAM User Guide.
//
// If you're new to AWS and need additional technical information about a specific
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
// (http://aws.amazon.com/documentation/).
//
// Endpoints
//
// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
// that maps to the US East (N. Virginia) region. Additional regions are available
// and are activated by default. For more information, see Activating and Deactivating
// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
// in the AWS General Reference.
//
// Recording API requests
//
// STS supports AWS CloudTrail, which is a service that records AWS calls for
// your AWS account and delivers log files to an Amazon S3 bucket. By using
// information collected by CloudTrail, you can determine what requests were
// successfully made to STS, who made the request, when it was made, and so
// on. To learn more about CloudTrail, including how to turn it on and find
// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type STS struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// A ServiceName is the name of the service the client will make API calls to.
const ServiceName = "sts"
// New creates a new instance of the STS client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(ServiceName, cfgs...)
return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
svc := &STS{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "2011-06-15",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(query.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a STS operation and runs any
// custom request initialization.
func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View File

@ -39,5 +39,5 @@ test: install generate-test-pbs
generate-test-pbs:
make install
make -C testdata
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto
make

View File

@ -84,15 +84,9 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, ok := extendable(in.Addr().Interface()); ok {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
}
uf := in.FieldByName("XXX_unrecognized")

View File

@ -61,6 +61,7 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
@ -77,7 +78,13 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0
}
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index
l := len(p.buf)
@ -100,107 +107,6 @@ func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
@ -434,8 +340,6 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
@ -474,11 +378,6 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
if required > 0 {
// Not enough information to determine the exact field.
// (See below.)
return &RequiredNotSetError{"{Unknown}"}
}
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
@ -491,12 +390,11 @@ func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
extmap := e.extensionsWrite()
ext := extmap[int32(tag)] // may be missing
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
extmap[int32(tag)] = ext
e.ExtensionMap()[int32(tag)] = ext
}
continue
}
@ -870,11 +768,10 @@ func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
}
}
keyelem, valelem := keyptr.Elem(), valptr.Elem()
if !keyelem.IsValid() {
keyelem = reflect.Zero(p.mtype.Key())
}
if !valelem.IsValid() {
valelem = reflect.Zero(p.mtype.Elem())
if !keyelem.IsValid() || !valelem.IsValid() {
// We did not decode the key or the value in the map entry.
// Either way, it's an invalid map entry.
return fmt.Errorf("proto: bad map data: missing key/val")
}
v.SetMapIndex(keyelem, valelem)

View File

@ -64,16 +64,8 @@ var (
// a struct with a repeated field containing a nil element.
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// errOneofHasNil is the error returned if Marshal is called with
// a struct with a oneof field containing a nil element.
errOneofHasNil = errors.New("proto: oneof field has nil value")
// ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil")
// ErrTooLarge is the error returned if Marshal is called with a
// message that encodes to >2GB.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
)
// The fundamental encoders that put bytes on the wire.
@ -82,10 +74,6 @@ var (
const maxVarintBytes = 10 // maximum length of a varint
// maxMarshalSize is the largest allowed size of an encoded protobuf,
// since C++ and Java use signed int32s for the size.
const maxMarshalSize = 1<<31 - 1
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
@ -234,6 +222,10 @@ func Marshal(pb Message) ([]byte, error) {
}
p := NewBuffer(nil)
err := p.Marshal(pb)
var state errorState
if err != nil && !state.shouldContinue(err, nil) {
return nil, err
}
if p.buf == nil && err == nil {
// Return a non-nil slice on success.
return []byte{}, nil
@ -262,8 +254,11 @@ func (p *Buffer) Marshal(pb Message) error {
// Can the object marshal itself?
if m, ok := pb.(Marshaler); ok {
data, err := m.Marshal()
if err != nil {
return err
}
p.buf = append(p.buf, data...)
return err
return nil
}
t, base, err := getbase(pb)
@ -275,12 +270,9 @@ func (p *Buffer) Marshal(pb Message) error {
}
if collectStats {
(stats).Encode++ // Parens are to work around a goimports bug.
stats.Encode++
}
if len(p.buf) > maxMarshalSize {
return ErrTooLarge
}
return err
}
@ -302,7 +294,7 @@ func Size(pb Message) (n int) {
}
if collectStats {
(stats).Size++ // Parens are to work around a goimports bug.
stats.Size++
}
return
@ -1007,6 +999,7 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) {
if p.isMarshaler {
m := structPointer_Interface(structp, p.stype).(Marshaler)
data, _ := m.Marshal()
n += len(p.tagcode)
n += sizeRawBytes(data)
continue
}
@ -1065,25 +1058,10 @@ func size_slice_struct_group(p *Properties, base structPointer) (n int) {
// Encode an extension map.
func (o *Buffer) enc_map(p *Properties, base structPointer) error {
exts := structPointer_ExtMap(base, p.field)
if err := encodeExtensionsMap(*exts); err != nil {
v := *structPointer_ExtMap(base, p.field)
if err := encodeExtensionMap(v); err != nil {
return err
}
return o.enc_map_body(*exts)
}
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
exts := structPointer_Extensions(base, p.field)
if err := encodeExtensions(exts); err != nil {
return err
}
v, _ := exts.extensionsRead()
return o.enc_map_body(v)
}
func (o *Buffer) enc_map_body(v map[int32]Extension) error {
// Fast-path for common cases: zero or one extensions.
if len(v) <= 1 {
for _, e := range v {
@ -1106,13 +1084,8 @@ func (o *Buffer) enc_map_body(v map[int32]Extension) error {
}
func size_map(p *Properties, base structPointer) int {
v := structPointer_ExtMap(base, p.field)
return extensionsMapSize(*v)
}
func size_exts(p *Properties, base structPointer) int {
v := structPointer_Extensions(base, p.field)
return extensionsSize(v)
v := *structPointer_ExtMap(base, p.field)
return sizeExtensionMap(v)
}
// Encode a map field.
@ -1141,7 +1114,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
return err
}
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
return err
}
return nil
@ -1151,6 +1124,11 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
for _, key := range v.MapKeys() {
val := v.MapIndex(key)
// The only illegal map entry values are nil message pointers.
if val.Kind() == reflect.Ptr && val.IsNil() {
return errors.New("proto: map has nil element")
}
keycopy.Set(key)
valcopy.Set(val)
@ -1238,18 +1216,13 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
return err
}
}
if len(o.buf) > maxMarshalSize {
return ErrTooLarge
}
}
}
// Do oneof fields.
if prop.oneofMarshaler != nil {
m := structPointer_Interface(base, prop.stype).(Message)
if err := prop.oneofMarshaler(m, o); err == ErrNil {
return errOneofHasNil
} else if err != nil {
if err := prop.oneofMarshaler(m, o); err != nil {
return err
}
}
@ -1257,9 +1230,6 @@ func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
// Add unrecognized fields at the end.
if prop.unrecField.IsValid() {
v := *structPointer_Bytes(base, prop.unrecField)
if len(o.buf)+len(v) > maxMarshalSize {
return ErrTooLarge
}
if len(v) > 0 {
o.buf = append(o.buf, v...)
}

View File

@ -54,17 +54,13 @@ Equality is defined in this way:
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
@ -125,16 +121,9 @@ func equalStruct(v1, v2 reflect.Value) bool {
}
}
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
@ -195,13 +184,6 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
}
return true
case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
@ -241,14 +223,8 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
}
// base is the struct type that the extensions are based on.
// x1 and x2 are InternalExtensions.
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
em1, _ := x1.extensionsRead()
em2, _ := x2.extensionsRead()
return equalExtMap(base, em1, em2)
}
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
// em1 and em2 are extension maps.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}

View File

@ -52,99 +52,14 @@ type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
// extendableProto is an interface implemented by any protocol buffer that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, bool) {
if ep, ok := p.(extendableProto); ok {
return ep, ok
}
if ep, ok := p.(extendableProtoV1); ok {
return extensionAdapter{ep}, ok
}
return nil, false
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
}
return e.p.extensionMap, &e.p.mu
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
@ -177,13 +92,8 @@ type Extension struct {
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
epb, ok := extendable(base)
if !ok {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
@ -198,12 +108,8 @@ func isExtensionField(pb extendableProto, field int32) bool {
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type.
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
@ -249,19 +155,8 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensions(e *XXX_InternalExtensions) error {
m, mu := e.extensionsRead()
if m == nil {
return nil // fast path
}
mu.Lock()
defer mu.Unlock()
return encodeExtensionsMap(m)
}
// encode encodes any unmarshaled (unencoded) extensions in e.
func encodeExtensionsMap(m map[int32]Extension) error {
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
func encodeExtensionMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
@ -289,17 +184,7 @@ func encodeExtensionsMap(m map[int32]Extension) error {
return nil
}
func extensionsSize(e *XXX_InternalExtensions) (n int) {
m, mu := e.extensionsRead()
if m == nil {
return 0
}
mu.Lock()
defer mu.Unlock()
return extensionsMapSize(m)
}
func extensionsMapSize(m map[int32]Extension) (n int) {
func sizeExtensionMap(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
@ -324,51 +209,26 @@ func extensionsMapSize(m map[int32]Extension) (n int) {
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
epb, ok := extendable(pb)
if !ok {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok = extmap[extension.Field]
mu.Unlock()
_, ok := pb.ExtensionMap()[extension.Field]
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, ok := extendable(pb)
if !ok {
return
}
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
extmap := epb.extensionsWrite()
delete(extmap, extension.Field)
delete(pb.ExtensionMap(), extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present and has no default value it returns ErrMissingExtension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
epb, ok := extendable(pb)
if !ok {
return nil, errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
@ -472,9 +332,10 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := extendable(pb)
epb, ok := pb.(extendableProto)
if !ok {
return nil, errors.New("proto: not an extendable proto")
err = errors.New("proto: not an extendable proto")
return
}
extensions = make([]interface{}, len(es))
for i, e := range es {
@ -489,44 +350,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
return
}
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, ok := extendable(pb)
if !ok {
return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
epb, ok := extendable(pb)
if !ok {
return errors.New("proto: not an extendable proto")
}
if err := checkExtensionTypes(epb, extension); err != nil {
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
@ -542,23 +368,10 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
epb, ok := extendable(pb)
if !ok {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.

View File

@ -308,7 +308,7 @@ func GetStats() Stats { return stats }
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
index int // write point
// pools of basic types to amortize allocation.
bools []bool
@ -889,10 +889,6 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true

View File

@ -149,21 +149,9 @@ func skipVarint(buf []byte) []byte {
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
if err := encodeExtensions(exts); err != nil {
return nil, err
}
m, _ = exts.extensionsRead()
case map[int32]Extension:
if err := encodeExtensionsMap(exts); err != nil {
return nil, err
}
m = exts
default:
return nil, errors.New("proto: not an extension map")
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil {
return nil, err
}
// Sort extension IDs to provide a deterministic encoding.
@ -190,17 +178,7 @@ func MarshalMessageSet(exts interface{}) ([]byte, error) {
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m = exts.extensionsWrite()
case map[int32]Extension:
m = exts
default:
return errors.New("proto: not an extension map")
}
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
@ -231,16 +209,7 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m, _ = exts.extensionsRead()
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
var b bytes.Buffer
b.WriteByte('{')
@ -283,7 +252,7 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil

View File

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine js
// +build appengine
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@ -139,11 +139,6 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// Extensions returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)

View File

@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine,!js
// +build !appengine
// This file contains the implementation of the proto field accesses using package unsafe.
@ -126,10 +126,6 @@ func structPointer_StringSlice(p structPointer, f field) *[]string {
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

View File

@ -473,13 +473,17 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
if p.proto3 {
p.size = size_slice_byte
// This is a []byte, which is either a bytes field,
// or the value of a map field. In the latter case,
// we always encode an empty []byte, so we should not
// use the proto3 enc/size funcs.
// f == nil iff this is the key/value of a map field.
if p.proto3 && f != nil {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
} else {
p.enc = (*Buffer).enc_slice_byte
p.size = size_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
@ -678,8 +682,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
reflect.PtrTo(t).Implements(extendableProtoV1Type)
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
@ -690,22 +693,15 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_InternalExtensions" { // special case
p.enc = (*Buffer).enc_exts
p.dec = nil // not needed
p.size = size_exts
} else if f.Name == "XXX_extensions" { // special case
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
} else if f.Name == "XXX_unrecognized" { // special case
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
}
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
prop.Prop[i] = p
prop.order[i] = i
if debug {
@ -715,7 +711,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
@ -844,29 +840,7 @@ func RegisterType(x Message, name string) {
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
// MessageType returns the message type (pointer to struct) for a named message.
func MessageType(name string) reflect.Type { return protoTypes[name] }
// A registry of all linked proto files.
var (
protoFiles = make(map[string][]byte) // file name => fileDescriptor
)
// RegisterFile is called from generated code and maps from the
// full file name of a .proto file to its compressed FileDescriptorProto.
func RegisterFile(filename string, fileDescriptor []byte) {
protoFiles[filename] = fileDescriptor
}
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
func FileDescriptor(filename string) []byte { return protoFiles[filename] }

View File

@ -154,7 +154,7 @@ func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Print("proto: textWriter unindented too far")
log.Printf("proto: textWriter unindented too far")
return
}
w.ind--
@ -175,93 +175,7 @@ type raw interface {
Bytes() []byte
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
func writeStruct(w *textWriter, sv reflect.Value) error {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
@ -313,7 +227,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
}
continue
}
if err := tm.writeAny(w, v, props); err != nil {
if err := writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -355,7 +269,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
if err := writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -372,7 +286,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -444,7 +358,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
}
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
if err := writeAny(w, fv, props); err != nil {
return err
}
@ -455,8 +369,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if _, ok := extendable(pv.Interface()); ok {
if err := tm.writeExtensions(w, pv); err != nil {
if pv.Type().Implements(extendableProtoType) {
if err := writeExtensions(w, pv); err != nil {
return err
}
}
@ -486,7 +400,7 @@ func writeRaw(w *textWriter, b []byte) error {
}
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
@ -513,7 +427,7 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Bytes())); err != nil {
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
return err
}
case reflect.String:
@ -535,15 +449,15 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
}
w.indent()
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := tm.writeStruct(w, v); err != nil {
} else if err := writeStruct(w, v); err != nil {
return err
}
w.unindent()
@ -687,24 +601,19 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
func writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep, _ := extendable(pv.Interface())
ep := pv.Interface().(extendableProto)
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m, mu := ep.extensionsRead()
if m == nil {
return nil
}
mu.Lock()
m := ep.ExtensionMap()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
mu.Unlock()
for _, extNum := range ids {
ext := m[extNum]
@ -727,13 +636,13 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
if err := writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
@ -742,7 +651,7 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error
return nil
}
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
func writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
@ -751,7 +660,7 @@ func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface
return err
}
}
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@ -778,13 +687,12 @@ func (w *textWriter) writeIndent() {
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
Compact bool // use compact text format (one line).
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
@ -799,11 +707,11 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
aw := &textWriter{
w: ww,
complete: true,
compact: tm.Compact,
compact: m.Compact,
}
if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if tm, ok := pb.(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
@ -817,7 +725,7 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := tm.writeStruct(aw, v); err != nil {
if err := writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
@ -827,9 +735,9 @@ func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
}
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
func (m *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
m.Marshal(&buf, pb)
return buf.String()
}

View File

@ -44,9 +44,6 @@ import (
"unicode/utf8"
)
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct {
Message string
Line int // 1-based line number
@ -166,7 +163,7 @@ func (p *textParser) advance() {
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
@ -454,10 +451,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
// "[extension]".
for {
tok := p.next()
if tok.err != nil {
@ -467,74 +461,33 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
break
}
if tok.value == "[" {
// Looks like an extension or an Any.
// Looks like an extension.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
extName, err := p.consumeExtName()
if err != nil {
return err
tok = p.next()
if tok.err != nil {
return tok.err
}
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == extName {
if d.Name == tok.value {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", extName)
return p.errorf("unrecognized extension %q", tok.value)
}
// Check the extension terminator.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != "]" {
return p.errorf("unrecognized extension terminator %q", tok.value)
}
props := &Properties{}
@ -561,7 +514,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(Message)
ep := sv.Addr().Interface().(extendableProto)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
@ -613,9 +566,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order. See b/28924776 for a time
// this went wrong.
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
@ -627,39 +579,32 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
default:
p.back()
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
@ -682,8 +627,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return err
}
reqFieldErr = err
}
if props.Required {
} else if props.Required {
reqCount--
}
@ -699,35 +643,6 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
return reqFieldErr
}
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
@ -792,12 +707,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// true/1/t/True or false/f/0/False.
// Either "true", "false", 1 or 0.
switch tok.value {
case "true", "1", "t", "True":
case "true", "1":
fv.SetBool(true)
return nil
case "false", "0", "f", "False":
case "false", "0":
fv.SetBool(false)
return nil
}

View File

@ -3,8 +3,8 @@ go_import_path: github.com/pkg/errors
go:
- 1.4.3
- 1.5.4
- 1.6.2
- 1.7.1
- 1.6.3
- 1.7.3
- tip
script:

View File

@ -231,12 +231,8 @@ func (c *Command) Usage() error {
// HelpFunc returns either the function set by SetHelpFunc for this command
// or a parent, or it returns a function with default help behavior.
func (c *Command) HelpFunc() func(*Command, []string) {
cmd := c
for cmd != nil {
if cmd.helpFunc != nil {
return cmd.helpFunc
}
cmd = cmd.parent
if helpFunc := c.checkHelpFunc(); helpFunc != nil {
return helpFunc
}
return func(*Command, []string) {
c.mergePersistentFlags()
@ -247,6 +243,20 @@ func (c *Command) HelpFunc() func(*Command, []string) {
}
}
// checkHelpFunc checks if there is helpFunc in ancestors of c.
func (c *Command) checkHelpFunc() func(*Command, []string) {
if c == nil {
return nil
}
if c.helpFunc != nil {
return c.helpFunc
}
if c.HasParent() {
return c.parent.checkHelpFunc()
}
return nil
}
// Help puts out the help for the command.
// Used when a user calls help [command].
// Can be defined by user by overriding HelpFunc.

View File

@ -3,9 +3,8 @@ sudo: false
language: go
go:
- 1.5.4
- 1.6.3
- 1.7
- 1.7.3
- tip
matrix:

View File

@ -514,7 +514,7 @@ func (f *FlagSet) FlagUsages() string {
if len(flag.NoOptDefVal) > 0 {
switch flag.Value.Type() {
case "string":
line += fmt.Sprintf("[=%q]", flag.NoOptDefVal)
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
case "bool":
if flag.NoOptDefVal != "true" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
@ -534,7 +534,7 @@ func (f *FlagSet) FlagUsages() string {
line += usage
if !flag.defaultIsZeroValue() {
if flag.Value.Type() == "string" {
line += fmt.Sprintf(" (default %q)", flag.DefValue)
line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
} else {
line += fmt.Sprintf(" (default %s)", flag.DefValue)
}

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -1,23 +0,0 @@
objx - by Mat Ryer and Tyler Bunnell
The MIT License (MIT)
Copyright (c) 2014 Stretchr, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,3 +0,0 @@
# objx
* Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx)

View File

@ -1,179 +0,0 @@
package objx
import (
"fmt"
"regexp"
"strconv"
"strings"
)
// arrayAccesRegexString is the regex used to extract the array number
// from the access path
const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$`
// arrayAccesRegex is the compiled arrayAccesRegexString
var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString)
// Get gets the value using the specified selector and
// returns it inside a new Obj object.
//
// If it cannot find the value, Get will return a nil
// value inside an instance of Obj.
//
// Get can only operate directly on map[string]interface{} and []interface.
//
// Example
//
// To access the title of the third chapter of the second book, do:
//
// o.Get("books[1].chapters[2].title")
func (m Map) Get(selector string) *Value {
rawObj := access(m, selector, nil, false, false)
return &Value{data: rawObj}
}
// Set sets the value using the specified selector and
// returns the object on which Set was called.
//
// Set can only operate directly on map[string]interface{} and []interface
//
// Example
//
// To set the title of the third chapter of the second book, do:
//
// o.Set("books[1].chapters[2].title","Time to Go")
func (m Map) Set(selector string, value interface{}) Map {
access(m, selector, value, true, false)
return m
}
// access accesses the object using the selector and performs the
// appropriate action.
func access(current, selector, value interface{}, isSet, panics bool) interface{} {
switch selector.(type) {
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
if array, ok := current.([]interface{}); ok {
index := intFromInterface(selector)
if index >= len(array) {
if panics {
panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
}
return nil
}
return array[index]
}
return nil
case string:
selStr := selector.(string)
selSegs := strings.SplitN(selStr, PathSeparator, 2)
thisSel := selSegs[0]
index := -1
var err error
// https://github.com/stretchr/objx/issues/12
if strings.Contains(thisSel, "[") {
arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel)
if len(arrayMatches) > 0 {
// Get the key into the map
thisSel = arrayMatches[1]
// Get the index into the array at the key
index, err = strconv.Atoi(arrayMatches[2])
if err != nil {
// This should never happen. If it does, something has gone
// seriously wrong. Panic.
panic("objx: Array index is not an integer. Must use array[int].")
}
}
}
if curMap, ok := current.(Map); ok {
current = map[string]interface{}(curMap)
}
// get the object in question
switch current.(type) {
case map[string]interface{}:
curMSI := current.(map[string]interface{})
if len(selSegs) <= 1 && isSet {
curMSI[thisSel] = value
return nil
} else {
current = curMSI[thisSel]
}
default:
current = nil
}
if current == nil && panics {
panic(fmt.Sprintf("objx: '%v' invalid on object.", selector))
}
// do we need to access the item of an array?
if index > -1 {
if array, ok := current.([]interface{}); ok {
if index < len(array) {
current = array[index]
} else {
if panics {
panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array)))
}
current = nil
}
}
}
if len(selSegs) > 1 {
current = access(current, selSegs[1], value, isSet, panics)
}
}
return current
}
// intFromInterface converts an interface object to the largest
// representation of an unsigned integer using a type switch and
// assertions
func intFromInterface(selector interface{}) int {
var value int
switch selector.(type) {
case int:
value = selector.(int)
case int8:
value = int(selector.(int8))
case int16:
value = int(selector.(int16))
case int32:
value = int(selector.(int32))
case int64:
value = int(selector.(int64))
case uint:
value = int(selector.(uint))
case uint8:
value = int(selector.(uint8))
case uint16:
value = int(selector.(uint16))
case uint32:
value = int(selector.(uint32))
case uint64:
value = int(selector.(uint64))
default:
panic("objx: array access argument is not an integer type (this should never happen)")
}
return value
}

View File

@ -1,13 +0,0 @@
package objx
const (
// PathSeparator is the character used to separate the elements
// of the keypath.
//
// For example, `location.address.city`
PathSeparator string = "."
// SignatureSeparator is the character that is used to
// separate the Base64 string from the security signature.
SignatureSeparator = "_"
)

View File

@ -1,117 +0,0 @@
package objx
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"net/url"
)
// JSON converts the contained object to a JSON string
// representation
func (m Map) JSON() (string, error) {
result, err := json.Marshal(m)
if err != nil {
err = errors.New("objx: JSON encode failed with: " + err.Error())
}
return string(result), err
}
// MustJSON converts the contained object to a JSON string
// representation and panics if there is an error
func (m Map) MustJSON() string {
result, err := m.JSON()
if err != nil {
panic(err.Error())
}
return result
}
// Base64 converts the contained object to a Base64 string
// representation of the JSON string representation
func (m Map) Base64() (string, error) {
var buf bytes.Buffer
jsonData, err := m.JSON()
if err != nil {
return "", err
}
encoder := base64.NewEncoder(base64.StdEncoding, &buf)
encoder.Write([]byte(jsonData))
encoder.Close()
return buf.String(), nil
}
// MustBase64 converts the contained object to a Base64 string
// representation of the JSON string representation and panics
// if there is an error
func (m Map) MustBase64() string {
result, err := m.Base64()
if err != nil {
panic(err.Error())
}
return result
}
// SignedBase64 converts the contained object to a Base64 string
// representation of the JSON string representation and signs it
// using the provided key.
func (m Map) SignedBase64(key string) (string, error) {
base64, err := m.Base64()
if err != nil {
return "", err
}
sig := HashWithKey(base64, key)
return base64 + SignatureSeparator + sig, nil
}
// MustSignedBase64 converts the contained object to a Base64 string
// representation of the JSON string representation and signs it
// using the provided key and panics if there is an error
func (m Map) MustSignedBase64(key string) string {
result, err := m.SignedBase64(key)
if err != nil {
panic(err.Error())
}
return result
}
/*
URL Query
------------------------------------------------
*/
// URLValues creates a url.Values object from an Obj. This
// function requires that the wrapped object be a map[string]interface{}
func (m Map) URLValues() url.Values {
vals := make(url.Values)
for k, v := range m {
//TODO: can this be done without sprintf?
vals.Set(k, fmt.Sprintf("%v", v))
}
return vals
}
// URLQuery gets an encoded URL query representing the given
// Obj. This function requires that the wrapped object be a
// map[string]interface{}
func (m Map) URLQuery() (string, error) {
return m.URLValues().Encode(), nil
}

View File

@ -1,72 +0,0 @@
// objx - Go package for dealing with maps, slices, JSON and other data.
//
// Overview
//
// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes
// a powerful `Get` method (among others) that allows you to easily and quickly get
// access to data within the map, without having to worry too much about type assertions,
// missing data, default values etc.
//
// Pattern
//
// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s
// easy.
//
// Call one of the `objx.` functions to create your `objx.Map` to get going:
//
// m, err := objx.FromJSON(json)
//
// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong,
// the rest will be optimistic and try to figure things out without panicking.
//
// Use `Get` to access the value you're interested in. You can use dot and array
// notation too:
//
// m.Get("places[0].latlng")
//
// Once you have saught the `Value` you're interested in, you can use the `Is*` methods
// to determine its type.
//
// if m.Get("code").IsStr() { /* ... */ }
//
// Or you can just assume the type, and use one of the strong type methods to
// extract the real value:
//
// m.Get("code").Int()
//
// If there's no value there (or if it's the wrong type) then a default value
// will be returned, or you can be explicit about the default value.
//
// Get("code").Int(-1)
//
// If you're dealing with a slice of data as a value, Objx provides many useful
// methods for iterating, manipulating and selecting that data. You can find out more
// by exploring the index below.
//
// Reading data
//
// A simple example of how to use Objx:
//
// // use MustFromJSON to make an objx.Map from some JSON
// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`)
//
// // get the details
// name := m.Get("name").Str()
// age := m.Get("age").Int()
//
// // get their nickname (or use their name if they
// // don't have one)
// nickname := m.Get("nickname").Str(name)
//
// Ranging
//
// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For
// example, to `range` the data, do what you would expect:
//
// m := objx.MustFromJSON(json)
// for key, value := range m {
//
// /* ... do your magic ... */
//
// }
package objx

View File

@ -1,222 +0,0 @@
package objx
import (
"encoding/base64"
"encoding/json"
"errors"
"io/ioutil"
"net/url"
"strings"
)
// MSIConvertable is an interface that defines methods for converting your
// custom types to a map[string]interface{} representation.
type MSIConvertable interface {
// MSI gets a map[string]interface{} (msi) representing the
// object.
MSI() map[string]interface{}
}
// Map provides extended functionality for working with
// untyped data, in particular map[string]interface (msi).
type Map map[string]interface{}
// Value returns the internal value instance
func (m Map) Value() *Value {
return &Value{data: m}
}
// Nil represents a nil Map.
var Nil Map = New(nil)
// New creates a new Map containing the map[string]interface{} in the data argument.
// If the data argument is not a map[string]interface, New attempts to call the
// MSI() method on the MSIConvertable interface to create one.
func New(data interface{}) Map {
if _, ok := data.(map[string]interface{}); !ok {
if converter, ok := data.(MSIConvertable); ok {
data = converter.MSI()
} else {
return nil
}
}
return Map(data.(map[string]interface{}))
}
// MSI creates a map[string]interface{} and puts it inside a new Map.
//
// The arguments follow a key, value pattern.
//
// Panics
//
// Panics if any key arugment is non-string or if there are an odd number of arguments.
//
// Example
//
// To easily create Maps:
//
// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true))
//
// // creates an Map equivalent to
// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}})
func MSI(keyAndValuePairs ...interface{}) Map {
newMap := make(map[string]interface{})
keyAndValuePairsLen := len(keyAndValuePairs)
if keyAndValuePairsLen%2 != 0 {
panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.")
}
for i := 0; i < keyAndValuePairsLen; i = i + 2 {
key := keyAndValuePairs[i]
value := keyAndValuePairs[i+1]
// make sure the key is a string
keyString, keyStringOK := key.(string)
if !keyStringOK {
panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.")
}
newMap[keyString] = value
}
return New(newMap)
}
// ****** Conversion Constructors
// MustFromJSON creates a new Map containing the data specified in the
// jsonString.
//
// Panics if the JSON is invalid.
func MustFromJSON(jsonString string) Map {
o, err := FromJSON(jsonString)
if err != nil {
panic("objx: MustFromJSON failed with error: " + err.Error())
}
return o
}
// FromJSON creates a new Map containing the data specified in the
// jsonString.
//
// Returns an error if the JSON is invalid.
func FromJSON(jsonString string) (Map, error) {
var data interface{}
err := json.Unmarshal([]byte(jsonString), &data)
if err != nil {
return Nil, err
}
return New(data), nil
}
// FromBase64 creates a new Obj containing the data specified
// in the Base64 string.
//
// The string is an encoded JSON string returned by Base64
func FromBase64(base64String string) (Map, error) {
decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String))
decoded, err := ioutil.ReadAll(decoder)
if err != nil {
return nil, err
}
return FromJSON(string(decoded))
}
// MustFromBase64 creates a new Obj containing the data specified
// in the Base64 string and panics if there is an error.
//
// The string is an encoded JSON string returned by Base64
func MustFromBase64(base64String string) Map {
result, err := FromBase64(base64String)
if err != nil {
panic("objx: MustFromBase64 failed with error: " + err.Error())
}
return result
}
// FromSignedBase64 creates a new Obj containing the data specified
// in the Base64 string.
//
// The string is an encoded JSON string returned by SignedBase64
func FromSignedBase64(base64String, key string) (Map, error) {
parts := strings.Split(base64String, SignatureSeparator)
if len(parts) != 2 {
return nil, errors.New("objx: Signed base64 string is malformed.")
}
sig := HashWithKey(parts[0], key)
if parts[1] != sig {
return nil, errors.New("objx: Signature for base64 data does not match.")
}
return FromBase64(parts[0])
}
// MustFromSignedBase64 creates a new Obj containing the data specified
// in the Base64 string and panics if there is an error.
//
// The string is an encoded JSON string returned by Base64
func MustFromSignedBase64(base64String, key string) Map {
result, err := FromSignedBase64(base64String, key)
if err != nil {
panic("objx: MustFromSignedBase64 failed with error: " + err.Error())
}
return result
}
// FromURLQuery generates a new Obj by parsing the specified
// query.
//
// For queries with multiple values, the first value is selected.
func FromURLQuery(query string) (Map, error) {
vals, err := url.ParseQuery(query)
if err != nil {
return nil, err
}
m := make(map[string]interface{})
for k, vals := range vals {
m[k] = vals[0]
}
return New(m), nil
}
// MustFromURLQuery generates a new Obj by parsing the specified
// query.
//
// For queries with multiple values, the first value is selected.
//
// Panics if it encounters an error
func MustFromURLQuery(query string) Map {
o, err := FromURLQuery(query)
if err != nil {
panic("objx: MustFromURLQuery failed with error: " + err.Error())
}
return o
}

View File

@ -1,81 +0,0 @@
package objx
// Exclude returns a new Map with the keys in the specified []string
// excluded.
func (d Map) Exclude(exclude []string) Map {
excluded := make(Map)
for k, v := range d {
var shouldInclude bool = true
for _, toExclude := range exclude {
if k == toExclude {
shouldInclude = false
break
}
}
if shouldInclude {
excluded[k] = v
}
}
return excluded
}
// Copy creates a shallow copy of the Obj.
func (m Map) Copy() Map {
copied := make(map[string]interface{})
for k, v := range m {
copied[k] = v
}
return New(copied)
}
// Merge blends the specified map with a copy of this map and returns the result.
//
// Keys that appear in both will be selected from the specified map.
// This method requires that the wrapped object be a map[string]interface{}
func (m Map) Merge(merge Map) Map {
return m.Copy().MergeHere(merge)
}
// Merge blends the specified map with this map and returns the current map.
//
// Keys that appear in both will be selected from the specified map. The original map
// will be modified. This method requires that
// the wrapped object be a map[string]interface{}
func (m Map) MergeHere(merge Map) Map {
for k, v := range merge {
m[k] = v
}
return m
}
// Transform builds a new Obj giving the transformer a chance
// to change the keys and values as it goes. This method requires that
// the wrapped object be a map[string]interface{}
func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map {
newMap := make(map[string]interface{})
for k, v := range m {
modifiedKey, modifiedVal := transformer(k, v)
newMap[modifiedKey] = modifiedVal
}
return New(newMap)
}
// TransformKeys builds a new map using the specified key mapping.
//
// Unspecified keys will be unaltered.
// This method requires that the wrapped object be a map[string]interface{}
func (m Map) TransformKeys(mapping map[string]string) Map {
return m.Transform(func(key string, value interface{}) (string, interface{}) {
if newKey, ok := mapping[key]; ok {
return newKey, value
}
return key, value
})
}

View File

@ -1,14 +0,0 @@
package objx
import (
"crypto/sha1"
"encoding/hex"
)
// HashWithKey hashes the specified string using the security
// key.
func HashWithKey(data, key string) string {
hash := sha1.New()
hash.Write([]byte(data + ":" + key))
return hex.EncodeToString(hash.Sum(nil))
}

View File

@ -1,17 +0,0 @@
package objx
// Has gets whether there is something at the specified selector
// or not.
//
// If m is nil, Has will always return false.
func (m Map) Has(selector string) bool {
if m == nil {
return false
}
return !m.Get(selector).IsNil()
}
// IsNil gets whether the data is nil or not.
func (v *Value) IsNil() bool {
return v == nil || v.data == nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,56 +0,0 @@
package objx
import (
"fmt"
"strconv"
)
// Value provides methods for extracting interface{} data in various
// types.
type Value struct {
// data contains the raw data being managed by this Value
data interface{}
}
// Data returns the raw data contained by this Value
func (v *Value) Data() interface{} {
return v.data
}
// String returns the value always as a string
func (v *Value) String() string {
switch {
case v.IsStr():
return v.Str()
case v.IsBool():
return strconv.FormatBool(v.Bool())
case v.IsFloat32():
return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32)
case v.IsFloat64():
return strconv.FormatFloat(v.Float64(), 'f', -1, 64)
case v.IsInt():
return strconv.FormatInt(int64(v.Int()), 10)
case v.IsInt():
return strconv.FormatInt(int64(v.Int()), 10)
case v.IsInt8():
return strconv.FormatInt(int64(v.Int8()), 10)
case v.IsInt16():
return strconv.FormatInt(int64(v.Int16()), 10)
case v.IsInt32():
return strconv.FormatInt(int64(v.Int32()), 10)
case v.IsInt64():
return strconv.FormatInt(v.Int64(), 10)
case v.IsUint():
return strconv.FormatUint(uint64(v.Uint()), 10)
case v.IsUint8():
return strconv.FormatUint(uint64(v.Uint8()), 10)
case v.IsUint16():
return strconv.FormatUint(uint64(v.Uint16()), 10)
case v.IsUint32():
return strconv.FormatUint(uint64(v.Uint32()), 10)
case v.IsUint64():
return strconv.FormatUint(v.Uint64(), 10)
}
return fmt.Sprintf("%#v", v.Data())
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
.DS_Store

View File

@ -1,16 +0,0 @@
language: go
sudo: false
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- 1.7
- tip
script:
- go test -v ./...

View File

@ -1,332 +0,0 @@
Testify - Thou Shalt Write Tests
================================
[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/testify)](https://goreportcard.com/report/github.com/stretchr/testify) [![GoDoc](https://godoc.org/github.com/stretchr/testify?status.svg)](https://godoc.org/github.com/stretchr/testify)
Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend.
Features include:
* [Easy assertions](#assert-package)
* [Mocking](#mock-package)
* [HTTP response trapping](#http-package)
* [Testing suite interfaces and functions](#suite-package)
Get started:
* Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date)
* For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing
* Check out the API Documentation http://godoc.org/github.com/stretchr/testify
* To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc)
* A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development)
[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package
-------------------------------------------------------------------------------------------
The `assert` package provides some helpful methods that allow you to write better test code in Go.
* Prints friendly, easy to read failure descriptions
* Allows for very readable code
* Optionally annotate each assertion with a message
See it in action:
```go
package yours
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSomething(t *testing.T) {
// assert equality
assert.Equal(t, 123, 123, "they should be equal")
// assert inequality
assert.NotEqual(t, 123, 456, "they should not be equal")
// assert for nil (good for errors)
assert.Nil(t, object)
// assert for not nil (good when you expect something)
if assert.NotNil(t, object) {
// now we know that object isn't nil, we are safe to make
// further assertions without causing any errors
assert.Equal(t, "Something", object.Value)
}
}
```
* Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities.
* Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions.
if you assert many times, use the below:
```go
package yours
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSomething(t *testing.T) {
assert := assert.New(t)
// assert equality
assert.Equal(123, 123, "they should be equal")
// assert inequality
assert.NotEqual(123, 456, "they should not be equal")
// assert for nil (good for errors)
assert.Nil(object)
// assert for not nil (good when you expect something)
if assert.NotNil(object) {
// now we know that object isn't nil, we are safe to make
// further assertions without causing any errors
assert.Equal("Something", object.Value)
}
}
```
[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package
---------------------------------------------------------------------------------------------
The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test.
See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details.
[`http`](http://godoc.org/github.com/stretchr/testify/http "API documentation") package
---------------------------------------------------------------------------------------
The `http` package contains test objects useful for testing code that relies on the `net/http` package. Check out the [(deprecated) API documentation for the `http` package](http://godoc.org/github.com/stretchr/testify/http).
We recommend you use [httptest](http://golang.org/pkg/net/http/httptest) instead.
[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package
----------------------------------------------------------------------------------------
The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code.
An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened:
```go
package yours
import (
"testing"
"github.com/stretchr/testify/mock"
)
/*
Test objects
*/
// MyMockedObject is a mocked object that implements an interface
// that describes an object that the code I am testing relies on.
type MyMockedObject struct{
mock.Mock
}
// DoSomething is a method on MyMockedObject that implements some interface
// and just records the activity, and returns what the Mock object tells it to.
//
// In the real object, this method would do something useful, but since this
// is a mocked object - we're just going to stub it out.
//
// NOTE: This method is not being tested here, code that uses this object is.
func (m *MyMockedObject) DoSomething(number int) (bool, error) {
args := m.Called(number)
return args.Bool(0), args.Error(1)
}
/*
Actual test functions
*/
// TestSomething is an example of how to use our test object to
// make assertions about some target code we are testing.
func TestSomething(t *testing.T) {
// create an instance of our test object
testObj := new(MyMockedObject)
// setup expectations
testObj.On("DoSomething", 123).Return(true, nil)
// call the code we are testing
targetFuncThatDoesSomethingWithObj(testObj)
// assert that the expectations were met
testObj.AssertExpectations(t)
}
```
For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock).
You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker.
[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package
-----------------------------------------------------------------------------------------
The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal.
An example suite is shown below:
```go
// Basic imports
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
// Define the suite, and absorb the built-in basic suite
// functionality from testify - including a T() method which
// returns the current testing context
type ExampleTestSuite struct {
suite.Suite
VariableThatShouldStartAtFive int
}
// Make sure that VariableThatShouldStartAtFive is set to five
// before each test
func (suite *ExampleTestSuite) SetupTest() {
suite.VariableThatShouldStartAtFive = 5
}
// All methods that begin with "Test" are run as tests within a
// suite.
func (suite *ExampleTestSuite) TestExample() {
assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)
}
// In order for 'go test' to run this suite, we need to create
// a normal test function and pass our suite to suite.Run
func TestExampleTestSuite(t *testing.T) {
suite.Run(t, new(ExampleTestSuite))
}
```
For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go)
For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite).
`Suite` object has assertion methods:
```go
// Basic imports
import (
"testing"
"github.com/stretchr/testify/suite"
)
// Define the suite, and absorb the built-in basic suite
// functionality from testify - including assertion methods.
type ExampleTestSuite struct {
suite.Suite
VariableThatShouldStartAtFive int
}
// Make sure that VariableThatShouldStartAtFive is set to five
// before each test
func (suite *ExampleTestSuite) SetupTest() {
suite.VariableThatShouldStartAtFive = 5
}
// All methods that begin with "Test" are run as tests within a
// suite.
func (suite *ExampleTestSuite) TestExample() {
suite.Equal(suite.VariableThatShouldStartAtFive, 5)
}
// In order for 'go test' to run this suite, we need to create
// a normal test function and pass our suite to suite.Run
func TestExampleTestSuite(t *testing.T) {
suite.Run(t, new(ExampleTestSuite))
}
```
------
Installation
============
To install Testify, use `go get`:
* Latest version: go get github.com/stretchr/testify
* Specific version: go get gopkg.in/stretchr/testify.v1
This will then make the following packages available to you:
github.com/stretchr/testify/assert
github.com/stretchr/testify/mock
github.com/stretchr/testify/http
Import the `testify/assert` package into your code using this template:
```go
package yours
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSomething(t *testing.T) {
assert.True(t, true, "True is true!")
}
```
------
Staying up to date
==================
To update Testify to the latest version, use `go get -u github.com/stretchr/testify`.
------
Version History
===============
* 1.0 - New package versioning strategy adopted.
------
Contributing
============
Please feel free to submit issues, fork the repository and send pull requests!
When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it.
------
Licence
=======
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
Please consider promoting this project if you find it useful.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,345 +1,386 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
*/
package assert
import (
http "net/http"
url "net/url"
time "time"
)
// Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
return Condition(a.t, comp, msgAndArgs...)
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
//
// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return Contains(a.t, s, contains, msgAndArgs...)
}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
//
// a.Empty(obj)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
// Equal asserts that two objects are equal.
//
//
// a.Equal(123, 123, "123 and 123 should be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
//
// actualObj, err := SomeFunction()
// a.EqualError(err, expectedErrorString, "An error was expected")
//
// if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...)
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
//
// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
// Error asserts that a function returned an error (i.e. not `nil`).
//
//
// actualObj, err := SomeFunction()
// if a.Error(err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
// Exactly asserts that two objects are equal is value and type.
//
//
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Exactly(a.t, expected, actual, msgAndArgs...)
}
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...)
}
// FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
return FailNow(a.t, failureMessage, msgAndArgs...)
}
// False asserts that the specified value is false.
//
//
// a.False(myBool, "myBool should be false")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...)
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
//
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyContains(a.t, handler, method, url, values, str)
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
//
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
return HTTPBodyNotContains(a.t, handler, method, url, values, str)
}
// HTTPError asserts that a specified handler returns an error status code.
//
//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPError(a.t, handler, method, url, values)
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPRedirect(a.t, handler, method, url, values)
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {
return HTTPSuccess(a.t, handler, method, url, values)
}
// Implements asserts that an object is implemented by the specified interface.
//
//
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return Implements(a.t, interfaceObject, object, msgAndArgs...)
}
// InDelta asserts that the two numerals are within delta of each other.
//
//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...)
}
// JSONEq asserts that two JSON strings are equivalent.
//
//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
return JSONEq(a.t, expected, actual, msgAndArgs...)
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
//
// a.Len(mySlice, 3, "The size of slice is not 3")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
return Len(a.t, object, length, msgAndArgs...)
}
// Nil asserts that the specified object is nil.
//
//
// a.Nil(err, "err should be nothing")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...)
}
// NoError asserts that a function returned no error (i.e. `nil`).
//
//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
// assert.Equal(t, actualObj, expectedObj)
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...)
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
//
// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return NotContains(a.t, s, contains, msgAndArgs...)
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
return NotEmpty(a.t, object, msgAndArgs...)
}
// NotEqual asserts that the specified values are NOT equal.
//
//
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
// NotNil asserts that the specified object is not nil.
//
//
// a.NotNil(err, "err should be something")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
return NotNil(a.t, object, msgAndArgs...)
}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
//
// a.NotPanics(func(){
// RemainCalm()
// }, "Calling RemainCalm() should NOT panic")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return NotPanics(a.t, f, msgAndArgs...)
}
// NotRegexp asserts that a specified regexp does not match a string.
//
//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
// a.NotRegexp("^start", "it's not starting")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return NotRegexp(a.t, rx, str, msgAndArgs...)
}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...)
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
//
// a.Panics(func(){
// GoCrazy()
// }, "Calling GoCrazy() should panic")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
// Regexp asserts that a specified regexp matches a string.
//
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
// a.Regexp("start...$", "it's not starting")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
return Regexp(a.t, rx, str, msgAndArgs...)
}
// True asserts that the specified value is true.
//
//
// a.True(myBool, "myBool should be true")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...)
}
// WithinDuration asserts that the two times are within duration delta of each other.
//
//
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
// Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...)

View File

@ -18,10 +18,6 @@ import (
"github.com/pmezard/go-difflib/difflib"
)
func init() {
spew.Config.SortKeys = true
}
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Errorf(format string, args ...interface{})
@ -69,7 +65,7 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool {
/* CallerInfo is necessary because the assert functions use the testing object
internally, causing it to print the file:line of the assert method, rather than where
the problem actually occurred in calling code.*/
the problem actually occured in calling code.*/
// CallerInfo returns an array of strings containing the file and line number
// of each stack frame leading from the current test to the assert call that
@ -86,9 +82,7 @@ func CallerInfo() []string {
for i := 0; ; i++ {
pc, file, line, ok = runtime.Caller(i)
if !ok {
// The breaks below failed to terminate the loop, and we ran off the
// end of the call stack.
break
return nil
}
// This is a huge edge case, but it will panic if this is the case, see #180
@ -96,21 +90,6 @@ func CallerInfo() []string {
break
}
f := runtime.FuncForPC(pc)
if f == nil {
break
}
name = f.Name()
// testing.tRunner is the standard library function that calls
// tests. Subtests are called directly by tRunner, without going through
// the Test/Benchmark/Example function that contains the t.Run calls, so
// with subtests we should break when we hit tRunner, without adding it
// to the list of callers.
if name == "testing.tRunner" {
break
}
parts := strings.Split(file, "/")
dir := parts[len(parts)-2]
file = parts[len(parts)-1]
@ -118,6 +97,11 @@ func CallerInfo() []string {
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
}
f := runtime.FuncForPC(pc)
if f == nil {
break
}
name = f.Name()
// Drop the package
segments := strings.Split(name, ".")
name = segments[len(segments)-1]
@ -278,48 +262,14 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
if !ObjectsAreEqual(expected, actual) {
diff := diff(expected, actual)
expected, actual = formatUnequalValues(expected, actual)
return Fail(t, fmt.Sprintf("Not equal: \n"+
"expected: %s\n"+
"received: %s%s", expected, actual, diff), msgAndArgs...)
return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
" != %#v (actual)%s", expected, actual, diff), msgAndArgs...)
}
return true
}
// formatUnequalValues takes two values of arbitrary types and returns string
// representations appropriate to be presented to the user.
//
// If the values are not of like type, the returned strings will be prefixed
// with the type name, and the value will be enclosed in parenthesis similar
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
aType := reflect.TypeOf(expected)
bType := reflect.TypeOf(actual)
if aType != bType && isNumericType(aType) && isNumericType(bType) {
return fmt.Sprintf("%v(%#v)", aType, expected),
fmt.Sprintf("%v(%#v)", bType, actual)
}
return fmt.Sprintf("%#v", expected),
fmt.Sprintf("%#v", actual)
}
func isNumericType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return true
case reflect.Float32, reflect.Float64:
return true
}
return false
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
@ -329,11 +279,8 @@ func isNumericType(t reflect.Type) bool {
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if !ObjectsAreEqualValues(expected, actual) {
diff := diff(expected, actual)
expected, actual = formatUnequalValues(expected, actual)
return Fail(t, fmt.Sprintf("Not equal: \n"+
"expected: %s\n"+
"received: %s%s", expected, actual, diff), msgAndArgs...)
return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+
" != %#v (actual)", expected, actual), msgAndArgs...)
}
return true
@ -885,11 +832,11 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
//
// Returns whether the assertion was successful (true) or not (false).
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err != nil {
return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
if isNil(err) {
return true
}
return true
return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...)
}
// Error asserts that a function returned an error (i.e. not `nil`).
@ -902,33 +849,29 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Returns whether the assertion was successful (true) or not (false).
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil {
return Fail(t, "An error is expected but got nil.", msgAndArgs...)
}
message := messageFromMsgAndArgs(msgAndArgs...)
return NotNil(t, err, "An error is expected but got nil. %s", message)
return true
}
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualError(t, err, expectedErrorString, "An error was expected")
// if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
if !Error(t, theError, msgAndArgs...) {
message := messageFromMsgAndArgs(msgAndArgs...)
if !NotNil(t, theError, "An error is expected but got nil. %s", message) {
return false
}
expected := errString
actual := theError.Error()
// don't need to use deep equals here, we know they are both strings
if expected != actual {
return Fail(t, fmt.Sprintf("Error message not equal:\n"+
"expected: %q\n"+
"received: %q", expected, actual), msgAndArgs...)
}
return true
s := "An error with value \"%s\" is expected but got \"%s\". %s"
return Equal(t, errString, theError.Error(),
s, errString, theError.Error(), message)
}
// matchRegexp return true if a specified regexp matches a string.
@ -1043,6 +986,7 @@ func diff(expected interface{}, actual interface{}) string {
return ""
}
spew.Config.SortKeys = true
e := spew.Sdump(expected)
a := spew.Sdump(actual)

View File

@ -99,7 +99,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)
}
return !contains

View File

@ -1,22 +0,0 @@
// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend.
//
// testify contains the following packages:
//
// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system.
//
// The http package contains tools to make it easier to test http activity using the Go testing system.
//
// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected.
//
// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces.
package testify
// blank imports help docs.
import (
// assert package
_ "github.com/stretchr/testify/assert"
// http package
_ "github.com/stretchr/testify/http"
// mock package
_ "github.com/stretchr/testify/mock"
)

View File

@ -1,2 +0,0 @@
// Package http DEPRECATED USE net/http/httptest
package http

View File

@ -1,49 +0,0 @@
package http
import (
"net/http"
)
// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
type TestResponseWriter struct {
// StatusCode is the last int written by the call to WriteHeader(int)
StatusCode int
// Output is a string containing the written bytes using the Write([]byte) func.
Output string
// header is the internal storage of the http.Header object
header http.Header
}
// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
func (rw *TestResponseWriter) Header() http.Header {
if rw.header == nil {
rw.header = make(http.Header)
}
return rw.header
}
// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
func (rw *TestResponseWriter) Write(bytes []byte) (int, error) {
// assume 200 success if no header has been set
if rw.StatusCode == 0 {
rw.WriteHeader(200)
}
// add these bytes to the output string
rw.Output = rw.Output + string(bytes)
// return normal values
return 0, nil
}
// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead.
func (rw *TestResponseWriter) WriteHeader(i int) {
rw.StatusCode = i
}

View File

@ -1,17 +0,0 @@
package http
import (
"github.com/stretchr/testify/mock"
"net/http"
)
// TestRoundTripper DEPRECATED USE net/http/httptest
type TestRoundTripper struct {
mock.Mock
}
// RoundTrip DEPRECATED USE net/http/httptest
func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
args := t.Called(req)
return args.Get(0).(*http.Response), args.Error(1)
}

View File

@ -1,44 +0,0 @@
// Package mock provides a system by which it is possible to mock your objects
// and verify calls are happening as expected.
//
// Example Usage
//
// The mock package provides an object, Mock, that tracks activity on another object. It is usually
// embedded into a test object as shown below:
//
// type MyTestObject struct {
// // add a Mock object instance
// mock.Mock
//
// // other fields go here as normal
// }
//
// When implementing the methods of an interface, you wire your functions up
// to call the Mock.Called(args...) method, and return the appropriate values.
//
// For example, to mock a method that saves the name and age of a person and returns
// the year of their birth or an error, you might write this:
//
// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) {
// args := o.Called(firstname, lastname, age)
// return args.Int(0), args.Error(1)
// }
//
// The Int, Error and Bool methods are examples of strongly typed getters that take the argument
// index position. Given this argument list:
//
// (12, true, "Something")
//
// You could read them out strongly typed like this:
//
// args.Int(0)
// args.Bool(1)
// args.String(2)
//
// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion:
//
// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine)
//
// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those
// cases you should check for nil first.
package mock

View File

@ -1,763 +0,0 @@
package mock
import (
"fmt"
"reflect"
"regexp"
"runtime"
"strings"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
"github.com/stretchr/objx"
"github.com/stretchr/testify/assert"
)
func inin() {
spew.Config.SortKeys = true
}
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
FailNow()
}
/*
Call
*/
// Call represents a method call and is used for setting expectations,
// as well as recording activity.
type Call struct {
Parent *Mock
// The name of the method that was or will be called.
Method string
// Holds the arguments of the method.
Arguments Arguments
// Holds the arguments that should be returned when
// this method is called.
ReturnArguments Arguments
// The number of times to return the return arguments when setting
// expectations. 0 means to always return the value.
Repeatability int
// Amount of times this call has been called
totalCalls int
// Holds a channel that will be used to block the Return until it either
// receives a message or is closed. nil means it returns immediately.
WaitFor <-chan time.Time
// Holds a handler used to manipulate arguments content that are passed by
// reference. It's useful when mocking methods such as unmarshalers or
// decoders.
RunFn func(Arguments)
}
func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call {
return &Call{
Parent: parent,
Method: methodName,
Arguments: methodArguments,
ReturnArguments: make([]interface{}, 0),
Repeatability: 0,
WaitFor: nil,
RunFn: nil,
}
}
func (c *Call) lock() {
c.Parent.mutex.Lock()
}
func (c *Call) unlock() {
c.Parent.mutex.Unlock()
}
// Return specifies the return arguments for the expectation.
//
// Mock.On("DoSomething").Return(errors.New("failed"))
func (c *Call) Return(returnArguments ...interface{}) *Call {
c.lock()
defer c.unlock()
c.ReturnArguments = returnArguments
return c
}
// Once indicates that that the mock should only return the value once.
//
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once()
func (c *Call) Once() *Call {
return c.Times(1)
}
// Twice indicates that that the mock should only return the value twice.
//
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice()
func (c *Call) Twice() *Call {
return c.Times(2)
}
// Times indicates that that the mock should only return the indicated number
// of times.
//
// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5)
func (c *Call) Times(i int) *Call {
c.lock()
defer c.unlock()
c.Repeatability = i
return c
}
// WaitUntil sets the channel that will block the mock's return until its closed
// or a message is received.
//
// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second))
func (c *Call) WaitUntil(w <-chan time.Time) *Call {
c.lock()
defer c.unlock()
c.WaitFor = w
return c
}
// After sets how long to block until the call returns
//
// Mock.On("MyMethod", arg1, arg2).After(time.Second)
func (c *Call) After(d time.Duration) *Call {
return c.WaitUntil(time.After(d))
}
// Run sets a handler to be called before returning. It can be used when
// mocking a method such as unmarshalers that takes a pointer to a struct and
// sets properties in such struct
//
// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) {
// arg := args.Get(0).(*map[string]interface{})
// arg["foo"] = "bar"
// })
func (c *Call) Run(fn func(Arguments)) *Call {
c.lock()
defer c.unlock()
c.RunFn = fn
return c
}
// On chains a new expectation description onto the mocked interface. This
// allows syntax like.
//
// Mock.
// On("MyMethod", 1).Return(nil).
// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error"))
func (c *Call) On(methodName string, arguments ...interface{}) *Call {
return c.Parent.On(methodName, arguments...)
}
// Mock is the workhorse used to track activity on another object.
// For an example of its usage, refer to the "Example Usage" section at the top
// of this document.
type Mock struct {
// Represents the calls that are expected of
// an object.
ExpectedCalls []*Call
// Holds the calls that were made to this mocked object.
Calls []Call
// TestData holds any data that might be useful for testing. Testify ignores
// this data completely allowing you to do whatever you like with it.
testData objx.Map
mutex sync.Mutex
}
// TestData holds any data that might be useful for testing. Testify ignores
// this data completely allowing you to do whatever you like with it.
func (m *Mock) TestData() objx.Map {
if m.testData == nil {
m.testData = make(objx.Map)
}
return m.testData
}
/*
Setting expectations
*/
// On starts a description of an expectation of the specified method
// being called.
//
// Mock.On("MyMethod", arg1, arg2)
func (m *Mock) On(methodName string, arguments ...interface{}) *Call {
for _, arg := range arguments {
if v := reflect.ValueOf(arg); v.Kind() == reflect.Func {
panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg))
}
}
m.mutex.Lock()
defer m.mutex.Unlock()
c := newCall(m, methodName, arguments...)
m.ExpectedCalls = append(m.ExpectedCalls, c)
return c
}
// /*
// Recording and responding to activity
// */
func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) {
m.mutex.Lock()
defer m.mutex.Unlock()
for i, call := range m.ExpectedCalls {
if call.Method == method && call.Repeatability > -1 {
_, diffCount := call.Arguments.Diff(arguments)
if diffCount == 0 {
return i, call
}
}
}
return -1, nil
}
func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) {
diffCount := 0
var closestCall *Call
for _, call := range m.expectedCalls() {
if call.Method == method {
_, tempDiffCount := call.Arguments.Diff(arguments)
if tempDiffCount < diffCount || diffCount == 0 {
diffCount = tempDiffCount
closestCall = call
}
}
}
if closestCall == nil {
return false, nil
}
return true, closestCall
}
func callString(method string, arguments Arguments, includeArgumentValues bool) string {
var argValsString string
if includeArgumentValues {
var argVals []string
for argIndex, arg := range arguments {
argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg))
}
argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t"))
}
return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString)
}
// Called tells the mock object that a method has been called, and gets an array
// of arguments to return. Panics if the call is unexpected (i.e. not preceded by
// appropriate .On .Return() calls)
// If Call.WaitFor is set, blocks until the channel is closed or receives a message.
func (m *Mock) Called(arguments ...interface{}) Arguments {
// get the calling function's name
pc, _, _, ok := runtime.Caller(1)
if !ok {
panic("Couldn't get the caller information")
}
functionPath := runtime.FuncForPC(pc).Name()
//Next four lines are required to use GCCGO function naming conventions.
//For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock
//uses inteface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree
//With GCCGO we need to remove interface information starting from pN<dd>.
re := regexp.MustCompile("\\.pN\\d+_")
if re.MatchString(functionPath) {
functionPath = re.Split(functionPath, -1)[0]
}
parts := strings.Split(functionPath, ".")
functionName := parts[len(parts)-1]
found, call := m.findExpectedCall(functionName, arguments...)
if found < 0 {
// we have to fail here - because we don't know what to do
// as the return arguments. This is because:
//
// a) this is a totally unexpected call to this method,
// b) the arguments are not what was expected, or
// c) the developer has forgotten to add an accompanying On...Return pair.
closestFound, closestCall := m.findClosestCall(functionName, arguments...)
if closestFound {
panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true), diffArguments(arguments, closestCall.Arguments)))
} else {
panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo()))
}
} else {
m.mutex.Lock()
switch {
case call.Repeatability == 1:
call.Repeatability = -1
call.totalCalls++
case call.Repeatability > 1:
call.Repeatability--
call.totalCalls++
case call.Repeatability == 0:
call.totalCalls++
}
m.mutex.Unlock()
}
// add the call
m.mutex.Lock()
m.Calls = append(m.Calls, *newCall(m, functionName, arguments...))
m.mutex.Unlock()
// block if specified
if call.WaitFor != nil {
<-call.WaitFor
}
if call.RunFn != nil {
call.RunFn(arguments)
}
return call.ReturnArguments
}
/*
Assertions
*/
type assertExpectationser interface {
AssertExpectations(TestingT) bool
}
// AssertExpectationsForObjects asserts that everything specified with On and Return
// of the specified objects was in fact called as expected.
//
// Calls may have occurred in any order.
func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool {
for _, obj := range testObjects {
if m, ok := obj.(Mock); ok {
t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)")
obj = &m
}
m := obj.(assertExpectationser)
if !m.AssertExpectations(t) {
return false
}
}
return true
}
// AssertExpectations asserts that everything specified with On and Return was
// in fact called as expected. Calls may have occurred in any order.
func (m *Mock) AssertExpectations(t TestingT) bool {
var somethingMissing bool
var failedExpectations int
// iterate through each expectation
expectedCalls := m.expectedCalls()
for _, expectedCall := range expectedCalls {
if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 {
somethingMissing = true
failedExpectations++
t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
} else {
m.mutex.Lock()
if expectedCall.Repeatability > 0 {
somethingMissing = true
failedExpectations++
} else {
t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String())
}
m.mutex.Unlock()
}
}
if somethingMissing {
t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo())
}
return !somethingMissing
}
// AssertNumberOfCalls asserts that the method was called expectedCalls times.
func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool {
var actualCalls int
for _, call := range m.calls() {
if call.Method == methodName {
actualCalls++
}
}
return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls))
}
// AssertCalled asserts that the method was called.
// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method.
func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool {
if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) {
t.Logf("%v", m.expectedCalls())
return false
}
return true
}
// AssertNotCalled asserts that the method was not called.
// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method.
func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool {
if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) {
t.Logf("%v", m.expectedCalls())
return false
}
return true
}
func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool {
for _, call := range m.calls() {
if call.Method == methodName {
_, differences := Arguments(expected).Diff(call.Arguments)
if differences == 0 {
// found the expected call
return true
}
}
}
// we didn't find the expected call
return false
}
func (m *Mock) expectedCalls() []*Call {
m.mutex.Lock()
defer m.mutex.Unlock()
return append([]*Call{}, m.ExpectedCalls...)
}
func (m *Mock) calls() []Call {
m.mutex.Lock()
defer m.mutex.Unlock()
return append([]Call{}, m.Calls...)
}
/*
Arguments
*/
// Arguments holds an array of method arguments or return values.
type Arguments []interface{}
const (
// Anything is used in Diff and Assert when the argument being tested
// shouldn't be taken into consideration.
Anything string = "mock.Anything"
)
// AnythingOfTypeArgument is a string that contains the type of an argument
// for use when type checking. Used in Diff and Assert.
type AnythingOfTypeArgument string
// AnythingOfType returns an AnythingOfTypeArgument object containing the
// name of the type to check for. Used in Diff and Assert.
//
// For example:
// Assert(t, AnythingOfType("string"), AnythingOfType("int"))
func AnythingOfType(t string) AnythingOfTypeArgument {
return AnythingOfTypeArgument(t)
}
// argumentMatcher performs custom argument matching, returning whether or
// not the argument is matched by the expectation fixture function.
type argumentMatcher struct {
// fn is a function which accepts one argument, and returns a bool.
fn reflect.Value
}
func (f argumentMatcher) Matches(argument interface{}) bool {
expectType := f.fn.Type().In(0)
if reflect.TypeOf(argument).AssignableTo(expectType) {
result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)})
return result[0].Bool()
}
return false
}
func (f argumentMatcher) String() string {
return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name())
}
// MatchedBy can be used to match a mock call based on only certain properties
// from a complex struct or some calculation. It takes a function that will be
// evaluated with the called argument and will return true when there's a match
// and false otherwise.
//
// Example:
// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" }))
//
// |fn|, must be a function accepting a single argument (of the expected type)
// which returns a bool. If |fn| doesn't match the required signature,
// MathedBy() panics.
func MatchedBy(fn interface{}) argumentMatcher {
fnType := reflect.TypeOf(fn)
if fnType.Kind() != reflect.Func {
panic(fmt.Sprintf("assert: arguments: %s is not a func", fn))
}
if fnType.NumIn() != 1 {
panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn))
}
if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool {
panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn))
}
return argumentMatcher{fn: reflect.ValueOf(fn)}
}
// Get Returns the argument at the specified index.
func (args Arguments) Get(index int) interface{} {
if index+1 > len(args) {
panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args)))
}
return args[index]
}
// Is gets whether the objects match the arguments specified.
func (args Arguments) Is(objects ...interface{}) bool {
for i, obj := range args {
if obj != objects[i] {
return false
}
}
return true
}
// Diff gets a string describing the differences between the arguments
// and the specified objects.
//
// Returns the diff string and number of differences found.
func (args Arguments) Diff(objects []interface{}) (string, int) {
var output = "\n"
var differences int
var maxArgCount = len(args)
if len(objects) > maxArgCount {
maxArgCount = len(objects)
}
for i := 0; i < maxArgCount; i++ {
var actual, expected interface{}
if len(objects) <= i {
actual = "(Missing)"
} else {
actual = objects[i]
}
if len(args) <= i {
expected = "(Missing)"
} else {
expected = args[i]
}
if matcher, ok := expected.(argumentMatcher); ok {
if matcher.Matches(actual) {
output = fmt.Sprintf("%s\t%d: \u2705 %s matched by %s\n", output, i, actual, matcher)
} else {
differences++
output = fmt.Sprintf("%s\t%d: \u2705 %s not matched by %s\n", output, i, actual, matcher)
}
} else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
// type checking
if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) {
// not match
differences++
output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual)
}
} else {
// normal checking
if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) {
// match
output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected)
} else {
// not match
differences++
output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected)
}
}
}
if differences == 0 {
return "No differences.", differences
}
return output, differences
}
// Assert compares the arguments with the specified objects and fails if
// they do not exactly match.
func (args Arguments) Assert(t TestingT, objects ...interface{}) bool {
// get the differences
diff, diffCount := args.Diff(objects)
if diffCount == 0 {
return true
}
// there are differences... report them...
t.Logf(diff)
t.Errorf("%sArguments do not match.", assert.CallerInfo())
return false
}
// String gets the argument at the specified index. Panics if there is no argument, or
// if the argument is of the wrong type.
//
// If no index is provided, String() returns a complete string representation
// of the arguments.
func (args Arguments) String(indexOrNil ...int) string {
if len(indexOrNil) == 0 {
// normal String() method - return a string representation of the args
var argsStr []string
for _, arg := range args {
argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg)))
}
return strings.Join(argsStr, ",")
} else if len(indexOrNil) == 1 {
// Index has been specified - get the argument at that index
var index = indexOrNil[0]
var s string
var ok bool
if s, ok = args.Get(index).(string); !ok {
panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index)))
}
return s
}
panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil)))
}
// Int gets the argument at the specified index. Panics if there is no argument, or
// if the argument is of the wrong type.
func (args Arguments) Int(index int) int {
var s int
var ok bool
if s, ok = args.Get(index).(int); !ok {
panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
}
return s
}
// Error gets the argument at the specified index. Panics if there is no argument, or
// if the argument is of the wrong type.
func (args Arguments) Error(index int) error {
obj := args.Get(index)
var s error
var ok bool
if obj == nil {
return nil
}
if s, ok = obj.(error); !ok {
panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
}
return s
}
// Bool gets the argument at the specified index. Panics if there is no argument, or
// if the argument is of the wrong type.
func (args Arguments) Bool(index int) bool {
var s bool
var ok bool
if s, ok = args.Get(index).(bool); !ok {
panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index)))
}
return s
}
func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
t := reflect.TypeOf(v)
k := t.Kind()
if k == reflect.Ptr {
t = t.Elem()
k = t.Kind()
}
return t, k
}
func diffArguments(expected Arguments, actual Arguments) string {
for x := range expected {
if diffString := diff(expected[x], actual[x]); diffString != "" {
return fmt.Sprintf("Difference found in argument %v:\n\n%s", x, diffString)
}
}
return ""
}
// diff returns a diff of both values as long as both are of the same type and
// are a struct, map, slice or array. Otherwise it returns an empty string.
func diff(expected interface{}, actual interface{}) string {
if expected == nil || actual == nil {
return ""
}
et, ek := typeAndKind(expected)
at, _ := typeAndKind(actual)
if et != at {
return ""
}
if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array {
return ""
}
e := spew.Sdump(expected)
a := spew.Sdump(actual)
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
A: difflib.SplitLines(e),
B: difflib.SplitLines(a),
FromFile: "Expected",
FromDate: "",
ToFile: "Actual",
ToDate: "",
Context: 1,
})
return diff
}

View File

@ -1,423 +1,464 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
*/
package require
import (
assert "github.com/stretchr/testify/assert"
http "net/http"
url "net/url"
time "time"
)
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) {
if !assert.Condition(t, comp, msgAndArgs...) {
t.FailNow()
}
if !assert.Condition(t, comp, msgAndArgs...) {
t.FailNow()
}
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
//
// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
if !assert.Contains(t, s, contains, msgAndArgs...) {
t.FailNow()
}
if !assert.Contains(t, s, contains, msgAndArgs...) {
t.FailNow()
}
}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
//
// assert.Empty(t, obj)
//
//
// Returns whether the assertion was successful (true) or not (false).
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if !assert.Empty(t, object, msgAndArgs...) {
t.FailNow()
}
if !assert.Empty(t, object, msgAndArgs...) {
t.FailNow()
}
}
// Equal asserts that two objects are equal.
//
//
// assert.Equal(t, 123, 123, "123 and 123 should be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if !assert.Equal(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
if !assert.Equal(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
}
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
// assert.EqualError(t, err, expectedErrorString, "An error was expected")
//
// Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
if !assert.EqualError(t, theError, errString, msgAndArgs...) {
t.FailNow()
}
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
//
// Returns whether the assertion was successful (true) or not (false).
func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
}
// Error asserts that a function returned an error (i.e. not `nil`).
//
//
// actualObj, err := SomeFunction()
// if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func Error(t TestingT, err error, msgAndArgs ...interface{}) {
if !assert.Error(t, err, msgAndArgs...) {
t.FailNow()
}
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) {
if !assert.EqualError(t, theError, errString, msgAndArgs...) {
t.FailNow()
}
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
//
// Returns whether the assertion was successful (true) or not (false).
func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if !assert.EqualValues(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
}
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
// if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func Error(t TestingT, err error, msgAndArgs ...interface{}) {
if !assert.Error(t, err, msgAndArgs...) {
t.FailNow()
}
}
// Exactly asserts that two objects are equal is value and type.
//
//
// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if !assert.Exactly(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
if !assert.Exactly(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
}
// Fail reports a failure through
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
if !assert.Fail(t, failureMessage, msgAndArgs...) {
t.FailNow()
}
if !assert.Fail(t, failureMessage, msgAndArgs...) {
t.FailNow()
}
}
// FailNow fails test
func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
if !assert.FailNow(t, failureMessage, msgAndArgs...) {
t.FailNow()
}
if !assert.FailNow(t, failureMessage, msgAndArgs...) {
t.FailNow()
}
}
// False asserts that the specified value is false.
//
//
// assert.False(t, myBool, "myBool should be false")
//
//
// Returns whether the assertion was successful (true) or not (false).
func False(t TestingT, value bool, msgAndArgs ...interface{}) {
if !assert.False(t, value, msgAndArgs...) {
t.FailNow()
}
if !assert.False(t, value, msgAndArgs...) {
t.FailNow()
}
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
//
// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
t.FailNow()
}
if !assert.HTTPBodyContains(t, handler, method, url, values, str) {
t.FailNow()
}
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
//
// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
t.FailNow()
}
if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) {
t.FailNow()
}
}
// HTTPError asserts that a specified handler returns an error status code.
//
//
// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
if !assert.HTTPError(t, handler, method, url, values) {
t.FailNow()
}
if !assert.HTTPError(t, handler, method, url, values) {
t.FailNow()
}
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
//
// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
if !assert.HTTPRedirect(t, handler, method, url, values) {
t.FailNow()
}
if !assert.HTTPRedirect(t, handler, method, url, values) {
t.FailNow()
}
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
//
// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) {
if !assert.HTTPSuccess(t, handler, method, url, values) {
t.FailNow()
}
if !assert.HTTPSuccess(t, handler, method, url, values) {
t.FailNow()
}
}
// Implements asserts that an object is implemented by the specified interface.
//
//
// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
t.FailNow()
}
if !assert.Implements(t, interfaceObject, object, msgAndArgs...) {
t.FailNow()
}
}
// InDelta asserts that the two numerals are within delta of each other.
//
//
// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)
//
//
// Returns whether the assertion was successful (true) or not (false).
func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
//
//
// Returns whether the assertion was successful (true) or not (false).
func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
t.FailNow()
}
if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) {
t.FailNow()
}
}
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
if !assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) {
t.FailNow()
}
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
}
// IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if !assert.IsType(t, expectedType, object, msgAndArgs...) {
t.FailNow()
}
if !assert.IsType(t, expectedType, object, msgAndArgs...) {
t.FailNow()
}
}
// JSONEq asserts that two JSON strings are equivalent.
//
//
// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
//
//
// Returns whether the assertion was successful (true) or not (false).
func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) {
if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
if !assert.JSONEq(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
//
// assert.Len(t, mySlice, 3, "The size of slice is not 3")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) {
if !assert.Len(t, object, length, msgAndArgs...) {
t.FailNow()
}
if !assert.Len(t, object, length, msgAndArgs...) {
t.FailNow()
}
}
// Nil asserts that the specified object is nil.
//
//
// assert.Nil(t, err, "err should be nothing")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if !assert.Nil(t, object, msgAndArgs...) {
t.FailNow()
}
if !assert.Nil(t, object, msgAndArgs...) {
t.FailNow()
}
}
// NoError asserts that a function returned no error (i.e. `nil`).
//
//
// actualObj, err := SomeFunction()
// if assert.NoError(t, err) {
// assert.Equal(t, actualObj, expectedObj)
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func NoError(t TestingT, err error, msgAndArgs ...interface{}) {
if !assert.NoError(t, err, msgAndArgs...) {
t.FailNow()
}
if !assert.NoError(t, err, msgAndArgs...) {
t.FailNow()
}
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
//
// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
//
//
// Returns whether the assertion was successful (true) or not (false).
func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) {
if !assert.NotContains(t, s, contains, msgAndArgs...) {
t.FailNow()
}
if !assert.NotContains(t, s, contains, msgAndArgs...) {
t.FailNow()
}
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
//
// if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1])
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if !assert.NotEmpty(t, object, msgAndArgs...) {
t.FailNow()
}
if !assert.NotEmpty(t, object, msgAndArgs...) {
t.FailNow()
}
}
// NotEqual asserts that the specified values are NOT equal.
//
//
// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
if !assert.NotEqual(t, expected, actual, msgAndArgs...) {
t.FailNow()
}
}
// NotNil asserts that the specified object is not nil.
//
//
// assert.NotNil(t, err, "err should be something")
//
//
// Returns whether the assertion was successful (true) or not (false).
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if !assert.NotNil(t, object, msgAndArgs...) {
t.FailNow()
}
if !assert.NotNil(t, object, msgAndArgs...) {
t.FailNow()
}
}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
//
// assert.NotPanics(t, func(){
// RemainCalm()
// }, "Calling RemainCalm() should NOT panic")
//
//
// Returns whether the assertion was successful (true) or not (false).
func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if !assert.NotPanics(t, f, msgAndArgs...) {
t.FailNow()
}
if !assert.NotPanics(t, f, msgAndArgs...) {
t.FailNow()
}
}
// NotRegexp asserts that a specified regexp does not match a string.
//
//
// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
// assert.NotRegexp(t, "^start", "it's not starting")
//
//
// Returns whether the assertion was successful (true) or not (false).
func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
t.FailNow()
}
if !assert.NotRegexp(t, rx, str, msgAndArgs...) {
t.FailNow()
}
}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if !assert.NotZero(t, i, msgAndArgs...) {
t.FailNow()
}
if !assert.NotZero(t, i, msgAndArgs...) {
t.FailNow()
}
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
//
// assert.Panics(t, func(){
// GoCrazy()
// }, "Calling GoCrazy() should panic")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) {
if !assert.Panics(t, f, msgAndArgs...) {
t.FailNow()
}
if !assert.Panics(t, f, msgAndArgs...) {
t.FailNow()
}
}
// Regexp asserts that a specified regexp matches a string.
//
//
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
// assert.Regexp(t, "start...$", "it's not starting")
//
//
// Returns whether the assertion was successful (true) or not (false).
func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) {
if !assert.Regexp(t, rx, str, msgAndArgs...) {
t.FailNow()
}
if !assert.Regexp(t, rx, str, msgAndArgs...) {
t.FailNow()
}
}
// True asserts that the specified value is true.
//
//
// assert.True(t, myBool, "myBool should be true")
//
//
// Returns whether the assertion was successful (true) or not (false).
func True(t TestingT, value bool, msgAndArgs ...interface{}) {
if !assert.True(t, value, msgAndArgs...) {
t.FailNow()
}
if !assert.True(t, value, msgAndArgs...) {
t.FailNow()
}
}
// WithinDuration asserts that the two times are within duration delta of each other.
//
//
// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
//
//
// Returns whether the assertion was successful (true) or not (false).
func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) {
t.FailNow()
}
}
// Zero asserts that i is the zero value for its type and returns the truth.
func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) {
if !assert.Zero(t, i, msgAndArgs...) {
t.FailNow()
}
if !assert.Zero(t, i, msgAndArgs...) {
t.FailNow()
}
}

View File

@ -1,6 +1,6 @@
{{.Comment}}
func {{.DocInfo.Name}}(t TestingT, {{.Params}}) {
if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
t.FailNow()
}
if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) {
t.FailNow()
}
}

View File

@ -1,346 +1,387 @@
/*
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
*/
package require
import (
assert "github.com/stretchr/testify/assert"
http "net/http"
url "net/url"
time "time"
)
// Condition uses a Comparison to assert a complex condition.
func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) {
Condition(a.t, comp, msgAndArgs...)
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
//
// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
Contains(a.t, s, contains, msgAndArgs...)
}
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
//
// a.Empty(obj)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...)
}
// Equal asserts that two objects are equal.
//
//
// a.Equal(123, 123, "123 and 123 should be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
Equal(a.t, expected, actual, msgAndArgs...)
}
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
//
// actualObj, err := SomeFunction()
// a.EqualError(err, expectedErrorString, "An error was expected")
//
// if assert.Error(t, err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) {
EqualError(a.t, theError, errString, msgAndArgs...)
}
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
//
// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
EqualValues(a.t, expected, actual, msgAndArgs...)
}
// Error asserts that a function returned an error (i.e. not `nil`).
//
//
// actualObj, err := SomeFunction()
// if a.Error(err, "An error was expected") {
// assert.Equal(t, err, expectedError)
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
Error(a.t, err, msgAndArgs...)
}
// Exactly asserts that two objects are equal is value and type.
//
//
// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
Exactly(a.t, expected, actual, msgAndArgs...)
}
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) {
Fail(a.t, failureMessage, msgAndArgs...)
}
// FailNow fails test
func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) {
FailNow(a.t, failureMessage, msgAndArgs...)
}
// False asserts that the specified value is false.
//
//
// a.False(myBool, "myBool should be false")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) {
False(a.t, value, msgAndArgs...)
}
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
//
// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
HTTPBodyContains(a.t, handler, method, url, values, str)
}
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
//
// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) {
HTTPBodyNotContains(a.t, handler, method, url, values, str)
}
// HTTPError asserts that a specified handler returns an error status code.
//
//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) {
HTTPError(a.t, handler, method, url, values)
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) {
HTTPRedirect(a.t, handler, method, url, values)
}
// HTTPSuccess asserts that a specified handler returns a success status code.
//
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) {
HTTPSuccess(a.t, handler, method, url, values)
}
// Implements asserts that an object is implemented by the specified interface.
//
//
// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
Implements(a.t, interfaceObject, object, msgAndArgs...)
}
// InDelta asserts that the two numerals are within delta of each other.
//
//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InDelta(a.t, expected, actual, delta, msgAndArgs...)
}
// InDeltaSlice is the same as InDelta, except it compares two slices.
func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
// InEpsilon asserts that expected and actual have a relative error less than epsilon
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
}
// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) {
InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
// InEpsilonSlice is the same as InEpsilon, except it compares two slices.
func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) {
InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
IsType(a.t, expectedType, object, msgAndArgs...)
}
// JSONEq asserts that two JSON strings are equivalent.
//
//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) {
JSONEq(a.t, expected, actual, msgAndArgs...)
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
//
// a.Len(mySlice, 3, "The size of slice is not 3")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) {
Len(a.t, object, length, msgAndArgs...)
}
// Nil asserts that the specified object is nil.
//
//
// a.Nil(err, "err should be nothing")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) {
Nil(a.t, object, msgAndArgs...)
}
// NoError asserts that a function returned no error (i.e. `nil`).
//
//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
// assert.Equal(t, actualObj, expectedObj)
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) {
NoError(a.t, err, msgAndArgs...)
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
//
// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) {
NotContains(a.t, s, contains, msgAndArgs...)
}
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
//
// if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1])
// }
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
NotEmpty(a.t, object, msgAndArgs...)
}
// NotEqual asserts that the specified values are NOT equal.
//
//
// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) {
NotEqual(a.t, expected, actual, msgAndArgs...)
}
// NotNil asserts that the specified object is not nil.
//
//
// a.NotNil(err, "err should be something")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) {
NotNil(a.t, object, msgAndArgs...)
}
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
//
// a.NotPanics(func(){
// RemainCalm()
// }, "Calling RemainCalm() should NOT panic")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
NotPanics(a.t, f, msgAndArgs...)
}
// NotRegexp asserts that a specified regexp does not match a string.
//
//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
// a.NotRegexp("^start", "it's not starting")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
NotRegexp(a.t, rx, str, msgAndArgs...)
}
// NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) {
NotZero(a.t, i, msgAndArgs...)
}
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
//
// a.Panics(func(){
// GoCrazy()
// }, "Calling GoCrazy() should panic")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) {
Panics(a.t, f, msgAndArgs...)
}
// Regexp asserts that a specified regexp matches a string.
//
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
// a.Regexp("start...$", "it's not starting")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) {
Regexp(a.t, rx, str, msgAndArgs...)
}
// True asserts that the specified value is true.
//
//
// a.True(myBool, "myBool should be true")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) {
True(a.t, value, msgAndArgs...)
}
// WithinDuration asserts that the two times are within duration delta of each other.
//
//
// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
//
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) {
WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
// Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) {
Zero(a.t, i, msgAndArgs...)

View File

@ -13,7 +13,7 @@ example, by using nonce 1 for the first message, nonce 2 for the second
message, etc. Nonces are long enough that randomly generated nonces have
negligible risk of collision.
This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html.
*/
package secretbox

View File

@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine,go1.7
// +build amd64,!gccgo,!appengine
package poly1305
// This function is implemented in sum_amd64.s
// This function is implemented in poly1305_amd64.s
//go:noescape
func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
// Sum generates an authenticator for m using a one-time key and puts the

Some files were not shown because too many files have changed in this diff Show More