Vendor update for aws sdk
Updated to latest version of go aws sdk. Use vendored sub pakages within aws sdk. Adds missing vendor packages for letsencrypt Fixes #1832 Signed-off-by: Derek McGowan <derek@mcgstyle.net> (github: dmcgowan)
This commit is contained in:
parent
2052f29be6
commit
acae5dcfff
64 changed files with 5659 additions and 1382 deletions
136
Godeps/Godeps.json
generated
136
Godeps/Godeps.json
generated
|
@ -1,7 +1,7 @@
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/docker/distribution",
|
"ImportPath": "github.com/docker/distribution",
|
||||||
"GoVersion": "go1.6",
|
"GoVersion": "go1.6",
|
||||||
"GodepVersion": "v70",
|
"GodepVersion": "v74",
|
||||||
"Packages": [
|
"Packages": [
|
||||||
"./..."
|
"./..."
|
||||||
],
|
],
|
||||||
|
@ -23,118 +23,128 @@
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
|
||||||
|
"Comment": "v1.2.4",
|
||||||
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
|
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign",
|
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
|
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
|
||||||
"Comment": "v1.1.0-14-g49c3892",
|
"Comment": "v1.2.4",
|
||||||
"Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee"
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini",
|
||||||
|
"Comment": "v1.2.4",
|
||||||
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath",
|
||||||
|
"Comment": "v1.2.4",
|
||||||
|
"Rev": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/bugsnag/bugsnag-go",
|
"ImportPath": "github.com/bugsnag/bugsnag-go",
|
||||||
|
@ -187,11 +197,6 @@
|
||||||
"ImportPath": "github.com/garyburd/redigo/redis",
|
"ImportPath": "github.com/garyburd/redigo/redis",
|
||||||
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
|
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/go-ini/ini",
|
|
||||||
"Comment": "v1.8.6",
|
|
||||||
"Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/golang/protobuf/proto",
|
"ImportPath": "github.com/golang/protobuf/proto",
|
||||||
"Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3"
|
"Rev": "8d92cf5fc15a4382f8964b08e1f42a75c0591aa3"
|
||||||
|
@ -212,11 +217,6 @@
|
||||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/jmespath/go-jmespath",
|
|
||||||
"Comment": "0.2.2-12-g0b12d6b",
|
|
||||||
"Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||||
"Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef"
|
"Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef"
|
||||||
|
@ -437,6 +437,22 @@
|
||||||
{
|
{
|
||||||
"ImportPath": "rsc.io/letsencrypt",
|
"ImportPath": "rsc.io/letsencrypt",
|
||||||
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
|
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "rsc.io/letsencrypt/vendor/github.com/xenolf/lego/acme",
|
||||||
|
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1",
|
||||||
|
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/cipher",
|
||||||
|
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "rsc.io/letsencrypt/vendor/gopkg.in/square/go-jose.v1/json",
|
||||||
|
"Rev": "a019c9e6fce0c7132679dea13bd8df7c86ffe26c"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
40
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
generated
vendored
|
@ -42,9 +42,12 @@ type Error interface {
|
||||||
OrigErr() error
|
OrigErr() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// BatchError is a batch of errors which also wraps lower level errors with code, message,
|
// BatchError is a batch of errors which also wraps lower level errors with
|
||||||
// and original errors. Calling Error() will only return the error that is at the end
|
// code, message, and original errors. Calling Error() will include all errors
|
||||||
// of the list.
|
// that occured in the batch.
|
||||||
|
//
|
||||||
|
// Deprecated: Replaced with BatchedErrors. Only defined for backwards
|
||||||
|
// compatibility.
|
||||||
type BatchError interface {
|
type BatchError interface {
|
||||||
// Satisfy the generic error interface.
|
// Satisfy the generic error interface.
|
||||||
error
|
error
|
||||||
|
@ -59,20 +62,35 @@ type BatchError interface {
|
||||||
OrigErrs() []error
|
OrigErrs() []error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BatchedErrors is a batch of errors which also wraps lower level errors with
|
||||||
|
// code, message, and original errors. Calling Error() will include all errors
|
||||||
|
// that occured in the batch.
|
||||||
|
//
|
||||||
|
// Replaces BatchError
|
||||||
|
type BatchedErrors interface {
|
||||||
|
// Satisfy the base Error interface.
|
||||||
|
Error
|
||||||
|
|
||||||
|
// Returns the original error if one was set. Nil is returned if not set.
|
||||||
|
OrigErrs() []error
|
||||||
|
}
|
||||||
|
|
||||||
// New returns an Error object described by the code, message, and origErr.
|
// New returns an Error object described by the code, message, and origErr.
|
||||||
//
|
//
|
||||||
// If origErr satisfies the Error interface it will not be wrapped within a new
|
// If origErr satisfies the Error interface it will not be wrapped within a new
|
||||||
// Error object and will instead be returned.
|
// Error object and will instead be returned.
|
||||||
func New(code, message string, origErr error) Error {
|
func New(code, message string, origErr error) Error {
|
||||||
if e, ok := origErr.(Error); ok && e != nil {
|
var errs []error
|
||||||
return e
|
if origErr != nil {
|
||||||
|
errs = append(errs, origErr)
|
||||||
}
|
}
|
||||||
return newBaseError(code, message, origErr)
|
return newBaseError(code, message, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBatchError returns an baseError with an expectation of an array of errors
|
// NewBatchError returns an BatchedErrors with a collection of errors as an
|
||||||
func NewBatchError(code, message string, errs []error) BatchError {
|
// array of errors.
|
||||||
return newBaseErrors(code, message, errs)
|
func NewBatchError(code, message string, errs []error) BatchedErrors {
|
||||||
|
return newBaseError(code, message, errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A RequestFailure is an interface to extract request failure information from
|
// A RequestFailure is an interface to extract request failure information from
|
||||||
|
@ -85,9 +103,9 @@ func NewBatchError(code, message string, errs []error) BatchError {
|
||||||
// output, err := s3manage.Upload(svc, input, opts)
|
// output, err := s3manage.Upload(svc, input, opts)
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// if reqerr, ok := err.(RequestFailure); ok {
|
// if reqerr, ok := err.(RequestFailure); ok {
|
||||||
// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
|
||||||
// } else {
|
// } else {
|
||||||
// log.Printf("Error:", err.Error()
|
// log.Println("Error:", err.Error())
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
|
|
69
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
69
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
generated
vendored
|
@ -34,36 +34,17 @@ type baseError struct {
|
||||||
errs []error
|
errs []error
|
||||||
}
|
}
|
||||||
|
|
||||||
// newBaseError returns an error object for the code, message, and err.
|
// newBaseError returns an error object for the code, message, and errors.
|
||||||
//
|
//
|
||||||
// code is a short no whitespace phrase depicting the classification of
|
// code is a short no whitespace phrase depicting the classification of
|
||||||
// the error that is being created.
|
// the error that is being created.
|
||||||
//
|
//
|
||||||
// message is the free flow string containing detailed information about the error.
|
// message is the free flow string containing detailed information about the
|
||||||
|
// error.
|
||||||
//
|
//
|
||||||
// origErr is the error object which will be nested under the new error to be returned.
|
// origErrs is the error objects which will be nested under the new errors to
|
||||||
func newBaseError(code, message string, origErr error) *baseError {
|
// be returned.
|
||||||
b := &baseError{
|
func newBaseError(code, message string, origErrs []error) *baseError {
|
||||||
code: code,
|
|
||||||
message: message,
|
|
||||||
}
|
|
||||||
|
|
||||||
if origErr != nil {
|
|
||||||
b.errs = append(b.errs, origErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// newBaseErrors returns an error object for the code, message, and errors.
|
|
||||||
//
|
|
||||||
// code is a short no whitespace phrase depicting the classification of
|
|
||||||
// the error that is being created.
|
|
||||||
//
|
|
||||||
// message is the free flow string containing detailed information about the error.
|
|
||||||
//
|
|
||||||
// origErrs is the error objects which will be nested under the new errors to be returned.
|
|
||||||
func newBaseErrors(code, message string, origErrs []error) *baseError {
|
|
||||||
b := &baseError{
|
b := &baseError{
|
||||||
code: code,
|
code: code,
|
||||||
message: message,
|
message: message,
|
||||||
|
@ -103,19 +84,26 @@ func (b baseError) Message() string {
|
||||||
return b.message
|
return b.message
|
||||||
}
|
}
|
||||||
|
|
||||||
// OrigErr returns the original error if one was set. Nil is returned if no error
|
// OrigErr returns the original error if one was set. Nil is returned if no
|
||||||
// was set. This only returns the first element in the list. If the full list is
|
// error was set. This only returns the first element in the list. If the full
|
||||||
// needed, use BatchError
|
// list is needed, use BatchedErrors.
|
||||||
func (b baseError) OrigErr() error {
|
func (b baseError) OrigErr() error {
|
||||||
if size := len(b.errs); size > 0 {
|
switch len(b.errs) {
|
||||||
return b.errs[0]
|
case 0:
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
case 1:
|
||||||
|
return b.errs[0]
|
||||||
|
default:
|
||||||
|
if err, ok := b.errs[0].(Error); ok {
|
||||||
|
return NewBatchError(err.Code(), err.Message(), b.errs[1:])
|
||||||
|
}
|
||||||
|
return NewBatchError("BatchedErrors",
|
||||||
|
"multiple errors occured", b.errs)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OrigErrs returns the original errors if one was set. An empty slice is returned if
|
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||||
// no error was set:w
|
// returned if no error was set.
|
||||||
func (b baseError) OrigErrs() []error {
|
func (b baseError) OrigErrs() []error {
|
||||||
return b.errs
|
return b.errs
|
||||||
}
|
}
|
||||||
|
@ -133,8 +121,8 @@ type requestError struct {
|
||||||
requestID string
|
requestID string
|
||||||
}
|
}
|
||||||
|
|
||||||
// newRequestError returns a wrapped error with additional information for request
|
// newRequestError returns a wrapped error with additional information for
|
||||||
// status code, and service requestID.
|
// request status code, and service requestID.
|
||||||
//
|
//
|
||||||
// Should be used to wrap all request which involve service requests. Even if
|
// Should be used to wrap all request which involve service requests. Even if
|
||||||
// the request failed without a service response, but had an HTTP status code
|
// the request failed without a service response, but had an HTTP status code
|
||||||
|
@ -173,6 +161,15 @@ func (r requestError) RequestID() string {
|
||||||
return r.requestID
|
return r.requestID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns the original errors if one was set. An empty slice is
|
||||||
|
// returned if no error was set.
|
||||||
|
func (r requestError) OrigErrs() []error {
|
||||||
|
if b, ok := r.awsError.(BatchedErrors); ok {
|
||||||
|
return b.OrigErrs()
|
||||||
|
}
|
||||||
|
return []error{r.OrigErr()}
|
||||||
|
}
|
||||||
|
|
||||||
// An error list that satisfies the golang interface
|
// An error list that satisfies the golang interface
|
||||||
type errorList []error
|
type errorList []error
|
||||||
|
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
generated
vendored
|
@ -91,6 +91,10 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
|
||||||
|
|
||||||
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
|
||||||
default:
|
default:
|
||||||
|
if !v.IsValid() {
|
||||||
|
fmt.Fprint(buf, "<invalid value>")
|
||||||
|
return
|
||||||
|
}
|
||||||
format := "%v"
|
format := "%v"
|
||||||
switch v.Interface().(type) {
|
switch v.Interface().(type) {
|
||||||
case string:
|
case string:
|
||||||
|
|
53
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
generated
vendored
|
@ -1,8 +1,8 @@
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
@ -30,16 +30,61 @@ func (d DefaultRetryer) MaxRetries() int {
|
||||||
return d.NumMaxRetries
|
return d.NumMaxRetries
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
|
||||||
|
|
||||||
// RetryRules returns the delay duration before retrying this request again
|
// RetryRules returns the delay duration before retrying this request again
|
||||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||||
delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30)
|
// Set the upper limit of delay in retrying at ~five minutes
|
||||||
|
minTime := 30
|
||||||
|
throttle := d.shouldThrottle(r)
|
||||||
|
if throttle {
|
||||||
|
minTime = 500
|
||||||
|
}
|
||||||
|
|
||||||
|
retryCount := r.RetryCount
|
||||||
|
if retryCount > 13 {
|
||||||
|
retryCount = 13
|
||||||
|
} else if throttle && retryCount > 8 {
|
||||||
|
retryCount = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
|
||||||
return time.Duration(delay) * time.Millisecond
|
return time.Duration(delay) * time.Millisecond
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShouldRetry returns if the request should be retried.
|
// ShouldRetry returns true if the request should be retried.
|
||||||
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
||||||
if r.HTTPResponse.StatusCode >= 500 {
|
if r.HTTPResponse.StatusCode >= 500 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return r.IsErrorRetryable()
|
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldThrottle returns true if the request should be throttled.
|
||||||
|
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||||
|
if r.HTTPResponse.StatusCode == 502 ||
|
||||||
|
r.HTTPResponse.StatusCode == 503 ||
|
||||||
|
r.HTTPResponse.StatusCode == 504 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return r.IsErrorThrottle()
|
||||||
|
}
|
||||||
|
|
||||||
|
// lockedSource is a thread-safe implementation of rand.Source
|
||||||
|
type lockedSource struct {
|
||||||
|
lk sync.Mutex
|
||||||
|
src rand.Source
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Int63() (n int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
n = r.src.Int63()
|
||||||
|
r.lk.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *lockedSource) Seed(seed int64) {
|
||||||
|
r.lk.Lock()
|
||||||
|
r.src.Seed(seed)
|
||||||
|
r.lk.Unlock()
|
||||||
}
|
}
|
||||||
|
|
54
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
54
vendor/github.com/aws/aws-sdk-go/aws/config.go
generated
vendored
|
@ -100,6 +100,31 @@ type Config struct {
|
||||||
// Amazon S3: Virtual Hosting of Buckets
|
// Amazon S3: Virtual Hosting of Buckets
|
||||||
S3ForcePathStyle *bool
|
S3ForcePathStyle *bool
|
||||||
|
|
||||||
|
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
|
||||||
|
// header to PUT requests over 2MB of content. 100-Continue instructs the
|
||||||
|
// HTTP client not to send the body until the service responds with a
|
||||||
|
// `continue` status. This is useful to prevent sending the request body
|
||||||
|
// until after the request is authenticated, and validated.
|
||||||
|
//
|
||||||
|
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
|
||||||
|
//
|
||||||
|
// 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
|
||||||
|
// `ExpectContinueTimeout` for information on adjusting the continue wait timeout.
|
||||||
|
// https://golang.org/pkg/net/http/#Transport
|
||||||
|
//
|
||||||
|
// You should use this flag to disble 100-Continue if you experiance issues
|
||||||
|
// with proxies or thrid party S3 compatible services.
|
||||||
|
S3Disable100Continue *bool
|
||||||
|
|
||||||
|
// Set this to `true` to enable S3 Accelerate feature. For all operations compatible
|
||||||
|
// with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible
|
||||||
|
// will fall back to normal S3 requests.
|
||||||
|
//
|
||||||
|
// The bucket must be enable for accelerate to be used with S3 client with accelerate
|
||||||
|
// enabled. If the bucket is not enabled for accelerate an error will be returned.
|
||||||
|
// The bucket name must be DNS compatible to also work with accelerate.
|
||||||
|
S3UseAccelerate *bool
|
||||||
|
|
||||||
// Set this to `true` to disable the EC2Metadata client from overriding the
|
// Set this to `true` to disable the EC2Metadata client from overriding the
|
||||||
// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
|
// default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
|
||||||
// client to create a new http.Client. This options is only meaningful if you're not
|
// client to create a new http.Client. This options is only meaningful if you're not
|
||||||
|
@ -114,13 +139,18 @@ type Config struct {
|
||||||
//
|
//
|
||||||
EC2MetadataDisableTimeoutOverride *bool
|
EC2MetadataDisableTimeoutOverride *bool
|
||||||
|
|
||||||
|
// SleepDelay is an override for the func the SDK will call when sleeping
|
||||||
|
// during the lifecycle of a request. Specifically this will be used for
|
||||||
|
// request delays. This value should only be used for testing. To adjust
|
||||||
|
// the delay of a request see the aws/client.DefaultRetryer and
|
||||||
|
// aws/request.Retryer.
|
||||||
SleepDelay func(time.Duration)
|
SleepDelay func(time.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig returns a new Config pointer that can be chained with builder methods to
|
// NewConfig returns a new Config pointer that can be chained with builder methods to
|
||||||
// set multiple configuration values inline without using pointers.
|
// set multiple configuration values inline without using pointers.
|
||||||
//
|
//
|
||||||
// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
|
// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
|
||||||
//
|
//
|
||||||
func NewConfig() *Config {
|
func NewConfig() *Config {
|
||||||
return &Config{}
|
return &Config{}
|
||||||
|
@ -210,6 +240,20 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithS3Disable100Continue sets a config S3Disable100Continue value returning
|
||||||
|
// a Config pointer for chaining.
|
||||||
|
func (c *Config) WithS3Disable100Continue(disable bool) *Config {
|
||||||
|
c.S3Disable100Continue = &disable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
|
||||||
|
// pointer for chaining.
|
||||||
|
func (c *Config) WithS3UseAccelerate(enable bool) *Config {
|
||||||
|
c.S3UseAccelerate = &enable
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
|
||||||
// returning a Config pointer for chaining.
|
// returning a Config pointer for chaining.
|
||||||
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
|
||||||
|
@ -288,6 +332,14 @@ func mergeInConfig(dst *Config, other *Config) {
|
||||||
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
dst.S3ForcePathStyle = other.S3ForcePathStyle
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if other.S3Disable100Continue != nil {
|
||||||
|
dst.S3Disable100Continue = other.S3Disable100Continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.S3UseAccelerate != nil {
|
||||||
|
dst.S3UseAccelerate = other.S3UseAccelerate
|
||||||
|
}
|
||||||
|
|
||||||
if other.EC2MetadataDisableTimeoutOverride != nil {
|
if other.EC2MetadataDisableTimeoutOverride != nil {
|
||||||
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
|
||||||
}
|
}
|
||||||
|
|
24
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
generated
vendored
|
@ -2,7 +2,7 @@ package aws
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
// String returns a pointer to of the string value passed in.
|
// String returns a pointer to the string value passed in.
|
||||||
func String(v string) *string {
|
func String(v string) *string {
|
||||||
return &v
|
return &v
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func StringValueMap(src map[string]*string) map[string]string {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bool returns a pointer to of the bool value passed in.
|
// Bool returns a pointer to the bool value passed in.
|
||||||
func Bool(v bool) *bool {
|
func Bool(v bool) *bool {
|
||||||
return &v
|
return &v
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,7 @@ func BoolValueMap(src map[string]*bool) map[string]bool {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int returns a pointer to of the int value passed in.
|
// Int returns a pointer to the int value passed in.
|
||||||
func Int(v int) *int {
|
func Int(v int) *int {
|
||||||
return &v
|
return &v
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ func IntValueMap(src map[string]*int) map[string]int {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int64 returns a pointer to of the int64 value passed in.
|
// Int64 returns a pointer to the int64 value passed in.
|
||||||
func Int64(v int64) *int64 {
|
func Int64(v int64) *int64 {
|
||||||
return &v
|
return &v
|
||||||
}
|
}
|
||||||
|
@ -238,7 +238,7 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// Float64 returns a pointer to of the float64 value passed in.
|
// Float64 returns a pointer to the float64 value passed in.
|
||||||
func Float64(v float64) *float64 {
|
func Float64(v float64) *float64 {
|
||||||
return &v
|
return &v
|
||||||
}
|
}
|
||||||
|
@ -297,7 +297,7 @@ func Float64ValueMap(src map[string]*float64) map[string]float64 {
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
// Time returns a pointer to of the time.Time value passed in.
|
// Time returns a pointer to the time.Time value passed in.
|
||||||
func Time(v time.Time) *time.Time {
|
func Time(v time.Time) *time.Time {
|
||||||
return &v
|
return &v
|
||||||
}
|
}
|
||||||
|
@ -311,6 +311,18 @@ func TimeValue(v *time.Time) time.Time {
|
||||||
return time.Time{}
|
return time.Time{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
|
||||||
|
// The result is undefined if the Unix time cannot be represented by an int64.
|
||||||
|
// Which includes calling TimeUnixMilli on a zero Time is undefined.
|
||||||
|
//
|
||||||
|
// This utility is useful for service API's such as CloudWatch Logs which require
|
||||||
|
// their unix time values to be in milliseconds.
|
||||||
|
//
|
||||||
|
// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
|
||||||
|
func TimeUnixMilli(t time.Time) int64 {
|
||||||
|
return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
|
||||||
|
}
|
||||||
|
|
||||||
// TimeSlice converts a slice of time.Time values into a slice of
|
// TimeSlice converts a slice of time.Time values into a slice of
|
||||||
// time.Time pointers
|
// time.Time pointers
|
||||||
func TimeSlice(src []time.Time) []*time.Time {
|
func TimeSlice(src []time.Time) []*time.Time {
|
||||||
|
|
25
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
25
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
generated
vendored
|
@ -24,14 +24,16 @@ type lener interface {
|
||||||
// BuildContentLengthHandler builds the content length of a request based on the body,
|
// BuildContentLengthHandler builds the content length of a request based on the body,
|
||||||
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
|
||||||
// to determine request body length and no "Content-Length" was specified it will panic.
|
// to determine request body length and no "Content-Length" was specified it will panic.
|
||||||
|
//
|
||||||
|
// The Content-Length will only be aded to the request if the length of the body
|
||||||
|
// is greater than 0. If the body is empty or the current `Content-Length`
|
||||||
|
// header is <= 0, the header will also be stripped.
|
||||||
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
|
||||||
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
|
||||||
length, _ := strconv.ParseInt(slength, 10, 64)
|
|
||||||
r.HTTPRequest.ContentLength = length
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var length int64
|
var length int64
|
||||||
|
|
||||||
|
if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
|
||||||
|
length, _ = strconv.ParseInt(slength, 10, 64)
|
||||||
|
} else {
|
||||||
switch body := r.Body.(type) {
|
switch body := r.Body.(type) {
|
||||||
case nil:
|
case nil:
|
||||||
length = 0
|
length = 0
|
||||||
|
@ -45,9 +47,15 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen
|
||||||
default:
|
default:
|
||||||
panic("Cannot get length of body, must provide `ContentLength`")
|
panic("Cannot get length of body, must provide `ContentLength`")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if length > 0 {
|
||||||
r.HTTPRequest.ContentLength = length
|
r.HTTPRequest.ContentLength = length
|
||||||
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
|
||||||
|
} else {
|
||||||
|
r.HTTPRequest.ContentLength = 0
|
||||||
|
r.HTTPRequest.Header.Del("Content-Length")
|
||||||
|
}
|
||||||
}}
|
}}
|
||||||
|
|
||||||
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
|
||||||
|
@ -64,6 +72,11 @@ var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *req
|
||||||
var err error
|
var err error
|
||||||
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
|
r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Prevent leaking if an HTTPResponse was returned. Clean up
|
||||||
|
// the body.
|
||||||
|
if r.HTTPResponse != nil {
|
||||||
|
r.HTTPResponse.Body.Close()
|
||||||
|
}
|
||||||
// Capture the case where url.Error is returned for error processing
|
// Capture the case where url.Error is returned for error processing
|
||||||
// response. e.g. 301 without location header comes back as string
|
// response. e.g. 301 without location header comes back as string
|
||||||
// error and r.HTTPResponse is nil. Other url redirect errors will
|
// error and r.HTTPResponse is nil. Other url redirect errors will
|
||||||
|
|
139
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
139
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
generated
vendored
|
@ -1,144 +1,17 @@
|
||||||
package corehandlers
|
package corehandlers
|
||||||
|
|
||||||
import (
|
import "github.com/aws/aws-sdk-go/aws/request"
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateParametersHandler is a request handler to validate the input parameters.
|
// ValidateParametersHandler is a request handler to validate the input parameters.
|
||||||
// Validating parameters only has meaning if done prior to the request being sent.
|
// Validating parameters only has meaning if done prior to the request being sent.
|
||||||
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
|
||||||
if r.ParamsFilled() {
|
if !r.ParamsFilled() {
|
||||||
v := validator{errors: []string{}}
|
|
||||||
v.validateAny(reflect.ValueOf(r.Params), "")
|
|
||||||
|
|
||||||
if count := len(v.errors); count > 0 {
|
|
||||||
format := "%d validation errors:\n- %s"
|
|
||||||
msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- "))
|
|
||||||
r.Error = awserr.New("InvalidParameter", msg, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
|
|
||||||
// A validator validates values. Collects validations errors which occurs.
|
|
||||||
type validator struct {
|
|
||||||
errors []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateAny will validate any struct, slice or map type. All validations
|
|
||||||
// are also performed recursively for nested types.
|
|
||||||
func (v *validator) validateAny(value reflect.Value, path string) {
|
|
||||||
value = reflect.Indirect(value)
|
|
||||||
if !value.IsValid() {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch value.Kind() {
|
if v, ok := r.Params.(request.Validator); ok {
|
||||||
case reflect.Struct:
|
if err := v.Validate(); err != nil {
|
||||||
v.validateStruct(value, path)
|
r.Error = err
|
||||||
case reflect.Slice:
|
|
||||||
for i := 0; i < value.Len(); i++ {
|
|
||||||
v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i))
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
for _, n := range value.MapKeys() {
|
|
||||||
v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}}
|
||||||
|
|
||||||
// validateStruct will validate the struct value's fields. If the structure has
|
|
||||||
// nested types those types will be validated also.
|
|
||||||
func (v *validator) validateStruct(value reflect.Value, path string) {
|
|
||||||
prefix := "."
|
|
||||||
if path == "" {
|
|
||||||
prefix = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < value.Type().NumField(); i++ {
|
|
||||||
f := value.Type().Field(i)
|
|
||||||
if strings.ToLower(f.Name[0:1]) == f.Name[0:1] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fvalue := value.FieldByName(f.Name)
|
|
||||||
|
|
||||||
err := validateField(f, fvalue, validateFieldRequired, validateFieldMin)
|
|
||||||
if err != nil {
|
|
||||||
v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
v.validateAny(fvalue, path+prefix+f.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error
|
|
||||||
|
|
||||||
func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error {
|
|
||||||
for _, fn := range funcs {
|
|
||||||
if err := fn(f, fvalue); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates that a field has a valid value provided for required fields.
|
|
||||||
func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error {
|
|
||||||
if f.Tag.Get("required") == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fvalue.Kind() {
|
|
||||||
case reflect.Ptr, reflect.Slice, reflect.Map:
|
|
||||||
if fvalue.IsNil() {
|
|
||||||
return fmt.Errorf("missing required parameter")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if !fvalue.IsValid() {
|
|
||||||
return fmt.Errorf("missing required parameter")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validates that if a value is provided for a field, that value must be at
|
|
||||||
// least a minimum length.
|
|
||||||
func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error {
|
|
||||||
minStr := f.Tag.Get("min")
|
|
||||||
if minStr == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
min, _ := strconv.ParseInt(minStr, 10, 64)
|
|
||||||
|
|
||||||
kind := fvalue.Kind()
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
if fvalue.IsNil() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
fvalue = fvalue.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fvalue.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if int64(fvalue.Len()) < min {
|
|
||||||
return fmt.Errorf("field too short, minimum length %d", min)
|
|
||||||
}
|
|
||||||
case reflect.Slice, reflect.Map:
|
|
||||||
if fvalue.IsNil() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if int64(fvalue.Len()) < min {
|
|
||||||
return fmt.Errorf("field too short, minimum length %d", min)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO min can also apply to number minimum value.
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
8
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
generated
vendored
|
@ -132,7 +132,7 @@ const iamSecurityCredsPath = "/iam/security-credentials"
|
||||||
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||||
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
resp, err := client.GetMetadata(iamSecurityCredsPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err)
|
return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
credsList := []string{}
|
credsList := []string{}
|
||||||
|
@ -142,7 +142,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Err(); err != nil {
|
if err := s.Err(); err != nil {
|
||||||
return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err)
|
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return credsList, nil
|
return credsList, nil
|
||||||
|
@ -157,7 +157,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ec2RoleCredRespBody{},
|
return ec2RoleCredRespBody{},
|
||||||
awserr.New("EC2RoleRequestError",
|
awserr.New("EC2RoleRequestError",
|
||||||
fmt.Sprintf("failed to get %s EC2 Role credentials", credsName),
|
fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
|
||||||
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
|
||||||
return ec2RoleCredRespBody{},
|
return ec2RoleCredRespBody{},
|
||||||
awserr.New("SerializationError",
|
awserr.New("SerializationError",
|
||||||
fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName),
|
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
generated
vendored
|
@ -14,7 +14,7 @@ var (
|
||||||
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
|
||||||
)
|
)
|
||||||
|
|
||||||
// A StaticProvider is a set of credentials which are set pragmatically,
|
// A StaticProvider is a set of credentials which are set programmatically,
|
||||||
// and will never expire.
|
// and will never expire.
|
||||||
type StaticProvider struct {
|
type StaticProvider struct {
|
||||||
Value
|
Value
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
generated
vendored
|
@ -66,7 +66,9 @@ func Handlers() request.Handlers {
|
||||||
var handlers request.Handlers
|
var handlers request.Handlers
|
||||||
|
|
||||||
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
|
||||||
|
handlers.Validate.AfterEachFn = request.HandlerListStopOnError
|
||||||
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
|
||||||
|
handlers.Build.AfterEachFn = request.HandlerListStopOnError
|
||||||
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
|
||||||
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
handlers.Send.PushBackNamed(corehandlers.SendHandler)
|
||||||
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
|
||||||
|
|
99
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
99
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
generated
vendored
|
@ -1,12 +1,19 @@
|
||||||
package ec2metadata
|
package ec2metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetMetadata uses the path provided to request
|
// GetMetadata uses the path provided to request information from the EC2
|
||||||
|
// instance metdata service. The content will be returned as a string, or
|
||||||
|
// error if the request failed.
|
||||||
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||||
op := &request.Operation{
|
op := &request.Operation{
|
||||||
Name: "GetMetadata",
|
Name: "GetMetadata",
|
||||||
|
@ -20,6 +27,68 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
|
||||||
return output.Content, req.Send()
|
return output.Content, req.Send()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetDynamicData uses the path provided to request information from the EC2
|
||||||
|
// instance metadata service for dynamic data. The content will be returned
|
||||||
|
// as a string, or error if the request failed.
|
||||||
|
func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
|
||||||
|
op := &request.Operation{
|
||||||
|
Name: "GetDynamicData",
|
||||||
|
HTTPMethod: "GET",
|
||||||
|
HTTPPath: path.Join("/", "dynamic", p),
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &metadataOutput{}
|
||||||
|
req := c.NewRequest(op, nil, output)
|
||||||
|
|
||||||
|
return output.Content, req.Send()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInstanceIdentityDocument retrieves an identity document describing an
|
||||||
|
// instance. Error is returned if the request fails or is unable to parse
|
||||||
|
// the response.
|
||||||
|
func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
|
||||||
|
resp, err := c.GetDynamicData("instance-identity/document")
|
||||||
|
if err != nil {
|
||||||
|
return EC2InstanceIdentityDocument{},
|
||||||
|
awserr.New("EC2MetadataRequestError",
|
||||||
|
"failed to get EC2 instance identity document", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := EC2InstanceIdentityDocument{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
|
||||||
|
return EC2InstanceIdentityDocument{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
"failed to decode EC2 instance identity document", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return doc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IAMInfo retrieves IAM info from the metadata API
|
||||||
|
func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
|
||||||
|
resp, err := c.GetMetadata("iam/info")
|
||||||
|
if err != nil {
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("EC2MetadataRequestError",
|
||||||
|
"failed to get EC2 IAM info", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := EC2IAMInfo{}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("SerializationError",
|
||||||
|
"failed to decode EC2 IAM info", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Code != "Success" {
|
||||||
|
errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
|
||||||
|
return EC2IAMInfo{},
|
||||||
|
awserr.New("EC2MetadataError", errMsg, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Region returns the region the instance is running in.
|
// Region returns the region the instance is running in.
|
||||||
func (c *EC2Metadata) Region() (string, error) {
|
func (c *EC2Metadata) Region() (string, error) {
|
||||||
resp, err := c.GetMetadata("placement/availability-zone")
|
resp, err := c.GetMetadata("placement/availability-zone")
|
||||||
|
@ -41,3 +110,31 @@ func (c *EC2Metadata) Available() bool {
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An EC2IAMInfo provides the shape for unmarshalling
|
||||||
|
// an IAM info from the metadata API
|
||||||
|
type EC2IAMInfo struct {
|
||||||
|
Code string
|
||||||
|
LastUpdated time.Time
|
||||||
|
InstanceProfileArn string
|
||||||
|
InstanceProfileID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// An EC2InstanceIdentityDocument provides the shape for unmarshalling
|
||||||
|
// an instance identity document
|
||||||
|
type EC2InstanceIdentityDocument struct {
|
||||||
|
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||||
|
AvailabilityZone string `json:"availabilityZone"`
|
||||||
|
PrivateIP string `json:"privateIp"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Region string `json:"region"`
|
||||||
|
InstanceID string `json:"instanceId"`
|
||||||
|
BillingProducts []string `json:"billingProducts"`
|
||||||
|
InstanceType string `json:"instanceType"`
|
||||||
|
AccountID string `json:"accountId"`
|
||||||
|
PendingTime time.Time `json:"pendingTime"`
|
||||||
|
ImageID string `json:"imageId"`
|
||||||
|
KernelID string `json:"kernelId"`
|
||||||
|
RamdiskID string `json:"ramdiskId"`
|
||||||
|
Architecture string `json:"architecture"`
|
||||||
|
}
|
||||||
|
|
23
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
23
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
generated
vendored
|
@ -3,7 +3,9 @@
|
||||||
package ec2metadata
|
package ec2metadata
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -91,23 +93,28 @@ type metadataOutput struct {
|
||||||
|
|
||||||
func unmarshalHandler(r *request.Request) {
|
func unmarshalHandler(r *request.Request) {
|
||||||
defer r.HTTPResponse.Body.Close()
|
defer r.HTTPResponse.Body.Close()
|
||||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
b := &bytes.Buffer{}
|
||||||
if err != nil {
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
data := r.Data.(*metadataOutput)
|
if data, ok := r.Data.(*metadataOutput); ok {
|
||||||
data.Content = string(b)
|
data.Content = b.String()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalError(r *request.Request) {
|
func unmarshalError(r *request.Request) {
|
||||||
defer r.HTTPResponse.Body.Close()
|
defer r.HTTPResponse.Body.Close()
|
||||||
_, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
b := &bytes.Buffer{}
|
||||||
if err != nil {
|
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||||
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO extract the error...
|
// Response body format is not consistent between metadata endpoints.
|
||||||
|
// Grab the error message as a string and include that as the source error
|
||||||
|
r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateEndpointHandler(r *request.Request) {
|
func validateEndpointHandler(r *request.Request) {
|
||||||
|
|
14
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
14
vendor/github.com/aws/aws-sdk-go/aws/logger.go
generated
vendored
|
@ -79,6 +79,20 @@ type Logger interface {
|
||||||
Log(...interface{})
|
Log(...interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A LoggerFunc is a convenience type to convert a function taking a variadic
|
||||||
|
// list of arguments and wrap it so the Logger interface can be used.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
|
||||||
|
// fmt.Fprintln(os.Stdout, args...)
|
||||||
|
// })})
|
||||||
|
type LoggerFunc func(...interface{})
|
||||||
|
|
||||||
|
// Log calls the wrapped function with the arguments provided
|
||||||
|
func (f LoggerFunc) Log(args ...interface{}) {
|
||||||
|
f(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
// NewDefaultLogger returns a Logger which will write log messages to stdout, and
|
||||||
// use same formatting runes as the stdlib log.Logger
|
// use same formatting runes as the stdlib log.Logger
|
||||||
func NewDefaultLogger() Logger {
|
func NewDefaultLogger() Logger {
|
||||||
|
|
53
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
53
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
generated
vendored
|
@ -50,9 +50,28 @@ func (h *Handlers) Clear() {
|
||||||
h.AfterRetry.Clear()
|
h.AfterRetry.Clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A HandlerListRunItem represents an entry in the HandlerList which
|
||||||
|
// is being run.
|
||||||
|
type HandlerListRunItem struct {
|
||||||
|
Index int
|
||||||
|
Handler NamedHandler
|
||||||
|
Request *Request
|
||||||
|
}
|
||||||
|
|
||||||
// A HandlerList manages zero or more handlers in a list.
|
// A HandlerList manages zero or more handlers in a list.
|
||||||
type HandlerList struct {
|
type HandlerList struct {
|
||||||
list []NamedHandler
|
list []NamedHandler
|
||||||
|
|
||||||
|
// Called after each request handler in the list is called. If set
|
||||||
|
// and the func returns true the HandlerList will continue to iterate
|
||||||
|
// over the request handlers. If false is returned the HandlerList
|
||||||
|
// will stop iterating.
|
||||||
|
//
|
||||||
|
// Should be used if extra logic to be performed between each handler
|
||||||
|
// in the list. This can be used to terminate a list's iteration
|
||||||
|
// based on a condition such as error like, HandlerListStopOnError.
|
||||||
|
// Or for logging like HandlerListLogItem.
|
||||||
|
AfterEachFn func(item HandlerListRunItem) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// A NamedHandler is a struct that contains a name and function callback.
|
// A NamedHandler is a struct that contains a name and function callback.
|
||||||
|
@ -63,7 +82,9 @@ type NamedHandler struct {
|
||||||
|
|
||||||
// copy creates a copy of the handler list.
|
// copy creates a copy of the handler list.
|
||||||
func (l *HandlerList) copy() HandlerList {
|
func (l *HandlerList) copy() HandlerList {
|
||||||
var n HandlerList
|
n := HandlerList{
|
||||||
|
AfterEachFn: l.AfterEachFn,
|
||||||
|
}
|
||||||
n.list = append([]NamedHandler{}, l.list...)
|
n.list = append([]NamedHandler{}, l.list...)
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
@ -111,9 +132,35 @@ func (l *HandlerList) Remove(n NamedHandler) {
|
||||||
|
|
||||||
// Run executes all handlers in the list with a given request object.
|
// Run executes all handlers in the list with a given request object.
|
||||||
func (l *HandlerList) Run(r *Request) {
|
func (l *HandlerList) Run(r *Request) {
|
||||||
for _, f := range l.list {
|
for i, h := range l.list {
|
||||||
f.Fn(r)
|
h.Fn(r)
|
||||||
|
item := HandlerListRunItem{
|
||||||
|
Index: i, Handler: h, Request: r,
|
||||||
}
|
}
|
||||||
|
if l.AfterEachFn != nil && !l.AfterEachFn(item) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerListLogItem logs the request handler and the state of the
|
||||||
|
// request's Error value. Always returns true to continue iterating
|
||||||
|
// request handlers in a HandlerList.
|
||||||
|
func HandlerListLogItem(item HandlerListRunItem) bool {
|
||||||
|
if item.Request.Config.Logger == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
item.Request.Config.Logger.Log("DEBUG: RequestHandler",
|
||||||
|
item.Index, item.Handler.Name, item.Request.Error)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerListStopOnError returns false to stop the HandlerList iterating
|
||||||
|
// over request handlers if Request.Error is not nil. True otherwise
|
||||||
|
// to continue iterating.
|
||||||
|
func HandlerListStopOnError(item HandlerListRunItem) bool {
|
||||||
|
return item.Request.Error == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
|
||||||
|
|
33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
33
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||||
|
req := &http.Request{
|
||||||
|
URL: &url.URL{},
|
||||||
|
Header: http.Header{},
|
||||||
|
Close: r.Close,
|
||||||
|
Body: body,
|
||||||
|
Host: r.Host,
|
||||||
|
Method: r.Method,
|
||||||
|
Proto: r.Proto,
|
||||||
|
ContentLength: r.ContentLength,
|
||||||
|
// Cancel will be deprecated in 1.7 and will be replaced with Context
|
||||||
|
Cancel: r.Cancel,
|
||||||
|
}
|
||||||
|
|
||||||
|
*req.URL = *r.URL
|
||||||
|
for k, v := range r.Header {
|
||||||
|
for _, vv := range v {
|
||||||
|
req.Header.Add(k, vv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
generated
vendored
Normal file
31
vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// +build !go1.5
|
||||||
|
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
)
|
||||||
|
|
||||||
|
func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
|
||||||
|
req := &http.Request{
|
||||||
|
URL: &url.URL{},
|
||||||
|
Header: http.Header{},
|
||||||
|
Close: r.Close,
|
||||||
|
Body: body,
|
||||||
|
Host: r.Host,
|
||||||
|
Method: r.Method,
|
||||||
|
Proto: r.Proto,
|
||||||
|
ContentLength: r.ContentLength,
|
||||||
|
}
|
||||||
|
|
||||||
|
*req.URL = *r.URL
|
||||||
|
for k, v := range r.Header {
|
||||||
|
for _, vv := range v {
|
||||||
|
req.Header.Add(k, vv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return req
|
||||||
|
}
|
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
49
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// offsetReader is a thread-safe io.ReadCloser to prevent racing
|
||||||
|
// with retrying requests
|
||||||
|
type offsetReader struct {
|
||||||
|
buf io.ReadSeeker
|
||||||
|
lock sync.RWMutex
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
|
||||||
|
reader := &offsetReader{}
|
||||||
|
buf.Seek(offset, 0)
|
||||||
|
|
||||||
|
reader.buf = buf
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is a thread-safe close. Uses the write lock.
|
||||||
|
func (o *offsetReader) Close() error {
|
||||||
|
o.lock.Lock()
|
||||||
|
defer o.lock.Unlock()
|
||||||
|
o.closed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read is a thread-safe read using a read lock.
|
||||||
|
func (o *offsetReader) Read(p []byte) (int, error) {
|
||||||
|
o.lock.RLock()
|
||||||
|
defer o.lock.RUnlock()
|
||||||
|
|
||||||
|
if o.closed {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.buf.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
|
||||||
|
// and close the old buffer.
|
||||||
|
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
|
||||||
|
o.Close()
|
||||||
|
return newOffsetReader(o.buf, offset)
|
||||||
|
}
|
64
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
generated
vendored
|
@ -12,6 +12,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,6 +39,7 @@ type Request struct {
|
||||||
RetryDelay time.Duration
|
RetryDelay time.Duration
|
||||||
NotHoist bool
|
NotHoist bool
|
||||||
SignedHeaderVals http.Header
|
SignedHeaderVals http.Header
|
||||||
|
LastSignedAt time.Time
|
||||||
|
|
||||||
built bool
|
built bool
|
||||||
}
|
}
|
||||||
|
@ -77,7 +79,13 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||||
}
|
}
|
||||||
|
|
||||||
httpReq, _ := http.NewRequest(method, "", nil)
|
httpReq, _ := http.NewRequest(method, "", nil)
|
||||||
httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p)
|
|
||||||
|
var err error
|
||||||
|
httpReq.URL, err = url.Parse(clientInfo.Endpoint + p)
|
||||||
|
if err != nil {
|
||||||
|
httpReq.URL = &url.URL{}
|
||||||
|
err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
|
||||||
|
}
|
||||||
|
|
||||||
r := &Request{
|
r := &Request{
|
||||||
Config: cfg,
|
Config: cfg,
|
||||||
|
@ -91,7 +99,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
|
||||||
HTTPRequest: httpReq,
|
HTTPRequest: httpReq,
|
||||||
Body: nil,
|
Body: nil,
|
||||||
Params: params,
|
Params: params,
|
||||||
Error: nil,
|
Error: err,
|
||||||
Data: data,
|
Data: data,
|
||||||
}
|
}
|
||||||
r.SetBufferBody([]byte{})
|
r.SetBufferBody([]byte{})
|
||||||
|
@ -131,7 +139,7 @@ func (r *Request) SetStringBody(s string) {
|
||||||
|
|
||||||
// SetReaderBody will set the request's body reader.
|
// SetReaderBody will set the request's body reader.
|
||||||
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
|
||||||
r.HTTPRequest.Body = ioutil.NopCloser(reader)
|
r.HTTPRequest.Body = newOffsetReader(reader, 0)
|
||||||
r.Body = reader
|
r.Body = reader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,20 +193,23 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) {
|
||||||
// which occurred will be returned.
|
// which occurred will be returned.
|
||||||
func (r *Request) Build() error {
|
func (r *Request) Build() error {
|
||||||
if !r.built {
|
if !r.built {
|
||||||
r.Error = nil
|
|
||||||
r.Handlers.Validate.Run(r)
|
r.Handlers.Validate.Run(r)
|
||||||
if r.Error != nil {
|
if r.Error != nil {
|
||||||
debugLogReqError(r, "Validate Request", false, r.Error)
|
debugLogReqError(r, "Validate Request", false, r.Error)
|
||||||
return r.Error
|
return r.Error
|
||||||
}
|
}
|
||||||
r.Handlers.Build.Run(r)
|
r.Handlers.Build.Run(r)
|
||||||
|
if r.Error != nil {
|
||||||
|
debugLogReqError(r, "Build Request", false, r.Error)
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
r.built = true
|
r.built = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.Error
|
return r.Error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sign will sign the request retuning error if errors are encountered.
|
// Sign will sign the request returning error if errors are encountered.
|
||||||
//
|
//
|
||||||
// Send will build the request prior to signing. All Sign Handlers will
|
// Send will build the request prior to signing. All Sign Handlers will
|
||||||
// be executed in the order they were set.
|
// be executed in the order they were set.
|
||||||
|
@ -217,28 +228,53 @@ func (r *Request) Sign() error {
|
||||||
//
|
//
|
||||||
// Send will sign the request prior to sending. All Send Handlers will
|
// Send will sign the request prior to sending. All Send Handlers will
|
||||||
// be executed in the order they were set.
|
// be executed in the order they were set.
|
||||||
|
//
|
||||||
|
// Canceling a request is non-deterministic. If a request has been canceled,
|
||||||
|
// then the transport will choose, randomly, one of the state channels during
|
||||||
|
// reads or getting the connection.
|
||||||
|
//
|
||||||
|
// readLoop() and getConn(req *Request, cm connectMethod)
|
||||||
|
// https://github.com/golang/go/blob/master/src/net/http/transport.go
|
||||||
func (r *Request) Send() error {
|
func (r *Request) Send() error {
|
||||||
for {
|
for {
|
||||||
r.Sign()
|
|
||||||
if r.Error != nil {
|
|
||||||
return r.Error
|
|
||||||
}
|
|
||||||
|
|
||||||
if aws.BoolValue(r.Retryable) {
|
if aws.BoolValue(r.Retryable) {
|
||||||
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
|
||||||
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
|
||||||
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-seek the body back to the original point in for a retry so that
|
var body io.ReadCloser
|
||||||
// send will send the body's contents again in the upcoming request.
|
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
|
||||||
r.Body.Seek(r.BodyStart, 0)
|
body = reader.CloseAndCopy(r.BodyStart)
|
||||||
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
|
} else {
|
||||||
|
if r.Config.Logger != nil {
|
||||||
|
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
|
||||||
}
|
}
|
||||||
|
r.Body.Seek(r.BodyStart, 0)
|
||||||
|
body = ioutil.NopCloser(r.Body)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
|
||||||
|
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
|
||||||
|
// Closing response body. Since we are setting a new request to send off, this
|
||||||
|
// response will get squashed and leaked.
|
||||||
|
r.HTTPResponse.Body.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Sign()
|
||||||
|
if r.Error != nil {
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
r.Retryable = nil
|
r.Retryable = nil
|
||||||
|
|
||||||
r.Handlers.Send.Run(r)
|
r.Handlers.Send.Run(r)
|
||||||
if r.Error != nil {
|
if r.Error != nil {
|
||||||
|
if strings.Contains(r.Error.Error(), "net/http: request canceled") {
|
||||||
|
return r.Error
|
||||||
|
}
|
||||||
|
|
||||||
err := r.Error
|
err := r.Error
|
||||||
r.Handlers.Retry.Run(r)
|
r.Handlers.Retry.Run(r)
|
||||||
r.Handlers.AfterRetry.Run(r)
|
r.Handlers.AfterRetry.Run(r)
|
||||||
|
|
19
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
@ -28,6 +28,9 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||||
var retryableCodes = map[string]struct{}{
|
var retryableCodes = map[string]struct{}{
|
||||||
"RequestError": {},
|
"RequestError": {},
|
||||||
"RequestTimeout": {},
|
"RequestTimeout": {},
|
||||||
|
}
|
||||||
|
|
||||||
|
var throttleCodes = map[string]struct{}{
|
||||||
"ProvisionedThroughputExceededException": {},
|
"ProvisionedThroughputExceededException": {},
|
||||||
"Throttling": {},
|
"Throttling": {},
|
||||||
"ThrottlingException": {},
|
"ThrottlingException": {},
|
||||||
|
@ -46,6 +49,11 @@ var credsExpiredCodes = map[string]struct{}{
|
||||||
"RequestExpired": {}, // EC2 Only
|
"RequestExpired": {}, // EC2 Only
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isCodeThrottle(code string) bool {
|
||||||
|
_, ok := throttleCodes[code]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
func isCodeRetryable(code string) bool {
|
func isCodeRetryable(code string) bool {
|
||||||
if _, ok := retryableCodes[code]; ok {
|
if _, ok := retryableCodes[code]; ok {
|
||||||
return true
|
return true
|
||||||
|
@ -70,6 +78,17 @@ func (r *Request) IsErrorRetryable() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||||
|
// Returns false if the request has no Error set
|
||||||
|
func (r *Request) IsErrorThrottle() bool {
|
||||||
|
if r.Error != nil {
|
||||||
|
if err, ok := r.Error.(awserr.Error); ok {
|
||||||
|
return isCodeThrottle(err.Code())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// IsErrorExpired returns whether the error code is a credential expiry error.
|
// IsErrorExpired returns whether the error code is a credential expiry error.
|
||||||
// Returns false if the request has no Error set.
|
// Returns false if the request has no Error set.
|
||||||
func (r *Request) IsErrorExpired() bool {
|
func (r *Request) IsErrorExpired() bool {
|
||||||
|
|
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
Normal file
234
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
Normal file
|
@ -0,0 +1,234 @@
|
||||||
|
package request
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// InvalidParameterErrCode is the error code for invalid parameters errors
|
||||||
|
InvalidParameterErrCode = "InvalidParameter"
|
||||||
|
// ParamRequiredErrCode is the error code for required parameter errors
|
||||||
|
ParamRequiredErrCode = "ParamRequiredError"
|
||||||
|
// ParamMinValueErrCode is the error code for fields with too low of a
|
||||||
|
// number value.
|
||||||
|
ParamMinValueErrCode = "ParamMinValueError"
|
||||||
|
// ParamMinLenErrCode is the error code for fields without enough elements.
|
||||||
|
ParamMinLenErrCode = "ParamMinLenError"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validator provides a way for types to perform validation logic on their
|
||||||
|
// input values that external code can use to determine if a type's values
|
||||||
|
// are valid.
|
||||||
|
type Validator interface {
|
||||||
|
Validate() error
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrInvalidParams provides wrapping of invalid parameter errors found when
|
||||||
|
// validating API operation input parameters.
|
||||||
|
type ErrInvalidParams struct {
|
||||||
|
// Context is the base context of the invalid parameter group.
|
||||||
|
Context string
|
||||||
|
errs []ErrInvalidParam
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a new invalid parameter error to the collection of invalid
|
||||||
|
// parameters. The context of the invalid parameter will be updated to reflect
|
||||||
|
// this collection.
|
||||||
|
func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
|
||||||
|
err.SetContext(e.Context)
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNested adds the invalid parameter errors from another ErrInvalidParams
|
||||||
|
// value into this collection. The nested errors will have their nested context
|
||||||
|
// updated and base context to reflect the merging.
|
||||||
|
//
|
||||||
|
// Use for nested validations errors.
|
||||||
|
func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
|
||||||
|
for _, err := range nested.errs {
|
||||||
|
err.SetContext(e.Context)
|
||||||
|
err.AddNestedContext(nestedCtx)
|
||||||
|
e.errs = append(e.errs, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of invalid parameter errors
|
||||||
|
func (e ErrInvalidParams) Len() int {
|
||||||
|
return len(e.errs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the code of the error
|
||||||
|
func (e ErrInvalidParams) Code() string {
|
||||||
|
return InvalidParameterErrCode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the message of the error
|
||||||
|
func (e ErrInvalidParams) Message() string {
|
||||||
|
return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string formatted form of the invalid parameters.
|
||||||
|
func (e ErrInvalidParams) Error() string {
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
|
||||||
|
|
||||||
|
for _, err := range e.errs {
|
||||||
|
fmt.Fprintf(w, "- %s\n", err.Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
|
||||||
|
func (e ErrInvalidParams) OrigErr() error {
|
||||||
|
return awserr.NewBatchError(
|
||||||
|
InvalidParameterErrCode, e.Message(), e.OrigErrs())
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErrs returns a slice of the invalid parameters
|
||||||
|
func (e ErrInvalidParams) OrigErrs() []error {
|
||||||
|
errs := make([]error, len(e.errs))
|
||||||
|
for i := 0; i < len(errs); i++ {
|
||||||
|
errs[i] = e.errs[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return errs
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrInvalidParam represents an invalid parameter error type.
|
||||||
|
type ErrInvalidParam interface {
|
||||||
|
awserr.Error
|
||||||
|
|
||||||
|
// Field name the error occurred on.
|
||||||
|
Field() string
|
||||||
|
|
||||||
|
// SetContext updates the context of the error.
|
||||||
|
SetContext(string)
|
||||||
|
|
||||||
|
// AddNestedContext updates the error's context to include a nested level.
|
||||||
|
AddNestedContext(string)
|
||||||
|
}
|
||||||
|
|
||||||
|
type errInvalidParam struct {
|
||||||
|
context string
|
||||||
|
nestedContext string
|
||||||
|
field string
|
||||||
|
code string
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code returns the error code for the type of invalid parameter.
|
||||||
|
func (e *errInvalidParam) Code() string {
|
||||||
|
return e.code
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message returns the reason the parameter was invalid, and its context.
|
||||||
|
func (e *errInvalidParam) Message() string {
|
||||||
|
return fmt.Sprintf("%s, %s.", e.msg, e.Field())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the string version of the invalid parameter error.
|
||||||
|
func (e *errInvalidParam) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.code, e.Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrigErr returns nil, Implemented for awserr.Error interface.
|
||||||
|
func (e *errInvalidParam) OrigErr() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field Returns the field and context the error occurred.
|
||||||
|
func (e *errInvalidParam) Field() string {
|
||||||
|
field := e.context
|
||||||
|
if len(field) > 0 {
|
||||||
|
field += "."
|
||||||
|
}
|
||||||
|
if len(e.nestedContext) > 0 {
|
||||||
|
field += fmt.Sprintf("%s.", e.nestedContext)
|
||||||
|
}
|
||||||
|
field += e.field
|
||||||
|
|
||||||
|
return field
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetContext updates the base context of the error.
|
||||||
|
func (e *errInvalidParam) SetContext(ctx string) {
|
||||||
|
e.context = ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNestedContext prepends a context to the field's path.
|
||||||
|
func (e *errInvalidParam) AddNestedContext(ctx string) {
|
||||||
|
if len(e.nestedContext) == 0 {
|
||||||
|
e.nestedContext = ctx
|
||||||
|
} else {
|
||||||
|
e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrParamRequired represents an required parameter error.
|
||||||
|
type ErrParamRequired struct {
|
||||||
|
errInvalidParam
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrParamRequired creates a new required parameter error.
|
||||||
|
func NewErrParamRequired(field string) *ErrParamRequired {
|
||||||
|
return &ErrParamRequired{
|
||||||
|
errInvalidParam{
|
||||||
|
code: ParamRequiredErrCode,
|
||||||
|
field: field,
|
||||||
|
msg: fmt.Sprintf("missing required field"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrParamMinValue represents a minimum value parameter error.
|
||||||
|
type ErrParamMinValue struct {
|
||||||
|
errInvalidParam
|
||||||
|
min float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrParamMinValue creates a new minimum value parameter error.
|
||||||
|
func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
|
||||||
|
return &ErrParamMinValue{
|
||||||
|
errInvalidParam: errInvalidParam{
|
||||||
|
code: ParamMinValueErrCode,
|
||||||
|
field: field,
|
||||||
|
msg: fmt.Sprintf("minimum field value of %v", min),
|
||||||
|
},
|
||||||
|
min: min,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinValue returns the field's require minimum value.
|
||||||
|
//
|
||||||
|
// float64 is returned for both int and float min values.
|
||||||
|
func (e *ErrParamMinValue) MinValue() float64 {
|
||||||
|
return e.min
|
||||||
|
}
|
||||||
|
|
||||||
|
// An ErrParamMinLen represents a minimum length parameter error.
|
||||||
|
type ErrParamMinLen struct {
|
||||||
|
errInvalidParam
|
||||||
|
min int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrParamMinLen creates a new minimum length parameter error.
|
||||||
|
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
|
||||||
|
return &ErrParamMinLen{
|
||||||
|
errInvalidParam: errInvalidParam{
|
||||||
|
code: ParamMinValueErrCode,
|
||||||
|
field: field,
|
||||||
|
msg: fmt.Sprintf("minimum field size of %v", min),
|
||||||
|
},
|
||||||
|
min: min,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinLen returns the field's required minimum length.
|
||||||
|
func (e *ErrParamMinLen) MinLen() int {
|
||||||
|
return e.min
|
||||||
|
}
|
2
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
generated
vendored
|
@ -86,7 +86,7 @@ func initHandlers(s *Session) {
|
||||||
//
|
//
|
||||||
// Example:
|
// Example:
|
||||||
// // Create a copy of the current session, configured for the us-west-2 region.
|
// // Create a copy of the current session, configured for the us-west-2 region.
|
||||||
// sess.Copy(&aws.Config{Region: aws.String("us-west-2"})
|
// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
|
||||||
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
|
func (s *Session) Copy(cfgs ...*aws.Config) *Session {
|
||||||
newSession := &Session{
|
newSession := &Session{
|
||||||
Config: s.Config.Copy(cfgs...),
|
Config: s.Config.Copy(cfgs...),
|
||||||
|
|
644
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
Normal file
644
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
Normal file
|
@ -0,0 +1,644 @@
|
||||||
|
// Package v4 implements signing for AWS V4 signer
|
||||||
|
//
|
||||||
|
// Provides request signing for request that need to be signed with
|
||||||
|
// AWS V4 Signatures.
|
||||||
|
package v4
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
||||||
|
timeFormat = "20060102T150405Z"
|
||||||
|
shortTimeFormat = "20060102"
|
||||||
|
|
||||||
|
// emptyStringSHA256 is a SHA256 of an empty string
|
||||||
|
emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
|
||||||
|
)
|
||||||
|
|
||||||
|
var ignoredHeaders = rules{
|
||||||
|
blacklist{
|
||||||
|
mapRule{
|
||||||
|
"Authorization": struct{}{},
|
||||||
|
"User-Agent": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// requiredSignedHeaders is a whitelist for build canonical headers.
|
||||||
|
var requiredSignedHeaders = rules{
|
||||||
|
whitelist{
|
||||||
|
mapRule{
|
||||||
|
"Cache-Control": struct{}{},
|
||||||
|
"Content-Disposition": struct{}{},
|
||||||
|
"Content-Encoding": struct{}{},
|
||||||
|
"Content-Language": struct{}{},
|
||||||
|
"Content-Md5": struct{}{},
|
||||||
|
"Content-Type": struct{}{},
|
||||||
|
"Expires": struct{}{},
|
||||||
|
"If-Match": struct{}{},
|
||||||
|
"If-Modified-Since": struct{}{},
|
||||||
|
"If-None-Match": struct{}{},
|
||||||
|
"If-Unmodified-Since": struct{}{},
|
||||||
|
"Range": struct{}{},
|
||||||
|
"X-Amz-Acl": struct{}{},
|
||||||
|
"X-Amz-Copy-Source": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Range": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Grant-Full-control": struct{}{},
|
||||||
|
"X-Amz-Grant-Read": struct{}{},
|
||||||
|
"X-Amz-Grant-Read-Acp": struct{}{},
|
||||||
|
"X-Amz-Grant-Write": struct{}{},
|
||||||
|
"X-Amz-Grant-Write-Acp": struct{}{},
|
||||||
|
"X-Amz-Metadata-Directive": struct{}{},
|
||||||
|
"X-Amz-Mfa": struct{}{},
|
||||||
|
"X-Amz-Request-Payer": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
||||||
|
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
||||||
|
"X-Amz-Storage-Class": struct{}{},
|
||||||
|
"X-Amz-Website-Redirect-Location": struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
patterns{"X-Amz-Meta-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// allowedHoisting is a whitelist for build query headers. The boolean value
|
||||||
|
// represents whether or not it is a pattern.
|
||||||
|
var allowedQueryHoisting = inclusiveRules{
|
||||||
|
blacklist{requiredSignedHeaders},
|
||||||
|
patterns{"X-Amz-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signer applies AWS v4 signing to given request. Use this to sign requests
|
||||||
|
// that need to be signed with AWS V4 Signatures.
|
||||||
|
type Signer struct {
|
||||||
|
// The authentication credentials the request will be signed against.
|
||||||
|
// This value must be set to sign requests.
|
||||||
|
Credentials *credentials.Credentials
|
||||||
|
|
||||||
|
// Sets the log level the signer should use when reporting information to
|
||||||
|
// the logger. If the logger is nil nothing will be logged. See
|
||||||
|
// aws.LogLevelType for more information on available logging levels
|
||||||
|
//
|
||||||
|
// By default nothing will be logged.
|
||||||
|
Debug aws.LogLevelType
|
||||||
|
|
||||||
|
// The logger loging information will be written to. If there the logger
|
||||||
|
// is nil, nothing will be logged.
|
||||||
|
Logger aws.Logger
|
||||||
|
|
||||||
|
// Disables the Signer's moving HTTP header key/value pairs from the HTTP
|
||||||
|
// request header to the request's query string. This is most commonly used
|
||||||
|
// with pre-signed requests preventing headers from being added to the
|
||||||
|
// request's query string.
|
||||||
|
DisableHeaderHoisting bool
|
||||||
|
|
||||||
|
// currentTimeFn returns the time value which represents the current time.
|
||||||
|
// This value should only be used for testing. If it is nil the default
|
||||||
|
// time.Now will be used.
|
||||||
|
currentTimeFn func() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSigner returns a Signer pointer configured with the credentials and optional
|
||||||
|
// option values provided. If not options are provided the Signer will use its
|
||||||
|
// default configuration.
|
||||||
|
func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
|
||||||
|
v4 := &Signer{
|
||||||
|
Credentials: credentials,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
option(v4)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v4
|
||||||
|
}
|
||||||
|
|
||||||
|
type signingCtx struct {
|
||||||
|
ServiceName string
|
||||||
|
Region string
|
||||||
|
Request *http.Request
|
||||||
|
Body io.ReadSeeker
|
||||||
|
Query url.Values
|
||||||
|
Time time.Time
|
||||||
|
ExpireTime time.Duration
|
||||||
|
SignedHeaderVals http.Header
|
||||||
|
|
||||||
|
credValues credentials.Value
|
||||||
|
isPresign bool
|
||||||
|
formattedTime string
|
||||||
|
formattedShortTime string
|
||||||
|
|
||||||
|
bodyDigest string
|
||||||
|
signedHeaders string
|
||||||
|
canonicalHeaders string
|
||||||
|
canonicalString string
|
||||||
|
credentialString string
|
||||||
|
stringToSign string
|
||||||
|
signature string
|
||||||
|
authorization string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign signs AWS v4 requests with the provided body, service name, region the
|
||||||
|
// request is made to, and time the request is signed at. The signTime allows
|
||||||
|
// you to specify that a request is signed for the future, and cannot be
|
||||||
|
// used until then.
|
||||||
|
//
|
||||||
|
// Returns a list of HTTP headers that were included in the signature or an
|
||||||
|
// error if signing the request failed. Generally for signed requests this value
|
||||||
|
// is not needed as the full request context will be captured by the http.Request
|
||||||
|
// value. It is included for reference though.
|
||||||
|
//
|
||||||
|
// Sign differs from Presign in that it will sign the request using HTTP
|
||||||
|
// header values. This type of signing is intended for http.Request values that
|
||||||
|
// will not be shared, or are shared in a way the header values on the request
|
||||||
|
// will not be lost.
|
||||||
|
//
|
||||||
|
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||||
|
// generated. To bypass the signer computing the hash you can set the
|
||||||
|
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||||
|
// only compute the hash if the request header value is empty.
|
||||||
|
func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
|
||||||
|
return v4.signWithBody(r, body, service, region, 0, signTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Presign signs AWS v4 requests with the provided body, service name, region
|
||||||
|
// the request is made to, and time the request is signed at. The signTime
|
||||||
|
// allows you to specify that a request is signed for the future, and cannot
|
||||||
|
// be used until then.
|
||||||
|
//
|
||||||
|
// Returns a list of HTTP headers that were included in the signature or an
|
||||||
|
// error if signing the request failed. For presigned requests these headers
|
||||||
|
// and their values must be included on the HTTP request when it is made. This
|
||||||
|
// is helpful to know what header values need to be shared with the party the
|
||||||
|
// presigned request will be distributed to.
|
||||||
|
//
|
||||||
|
// Presign differs from Sign in that it will sign the request using query string
|
||||||
|
// instead of header values. This allows you to share the Presigned Request's
|
||||||
|
// URL with third parties, or distribute it throughout your system with minimal
|
||||||
|
// dependencies.
|
||||||
|
//
|
||||||
|
// Presign also takes an exp value which is the duration the
|
||||||
|
// signed request will be valid after the signing time. This is allows you to
|
||||||
|
// set when the request will expire.
|
||||||
|
//
|
||||||
|
// The requests body is an io.ReadSeeker so the SHA256 of the body can be
|
||||||
|
// generated. To bypass the signer computing the hash you can set the
|
||||||
|
// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
|
||||||
|
// only compute the hash if the request header value is empty.
|
||||||
|
//
|
||||||
|
// Presigning a S3 request will not compute the body's SHA256 hash by default.
|
||||||
|
// This is done due to the general use case for S3 presigned URLs is to share
|
||||||
|
// PUT/GET capabilities. If you would like to include the body's SHA256 in the
|
||||||
|
// presigned request's signature you can set the "X-Amz-Content-Sha256"
|
||||||
|
// HTTP header and that will be included in the request's signature.
|
||||||
|
func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||||
|
return v4.signWithBody(r, body, service, region, exp, signTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
|
||||||
|
currentTimeFn := v4.currentTimeFn
|
||||||
|
if currentTimeFn == nil {
|
||||||
|
currentTimeFn = time.Now
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &signingCtx{
|
||||||
|
Request: r,
|
||||||
|
Body: body,
|
||||||
|
Query: r.URL.Query(),
|
||||||
|
Time: signTime,
|
||||||
|
ExpireTime: exp,
|
||||||
|
isPresign: exp != 0,
|
||||||
|
ServiceName: service,
|
||||||
|
Region: region,
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.isRequestSigned() {
|
||||||
|
if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) {
|
||||||
|
// If the request is already signed, and the credentials have not
|
||||||
|
// expired, and the request is not too old ignore the signing request.
|
||||||
|
return ctx.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
ctx.Time = currentTimeFn()
|
||||||
|
ctx.handlePresignRemoval()
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ctx.credValues, err = v4.Credentials.Get()
|
||||||
|
if err != nil {
|
||||||
|
return http.Header{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.assignAmzQueryValues()
|
||||||
|
ctx.build(v4.DisableHeaderHoisting)
|
||||||
|
|
||||||
|
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
||||||
|
v4.logSigningInfo(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx.SignedHeaderVals, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) handlePresignRemoval() {
|
||||||
|
if !ctx.isPresign {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// The credentials have expired for this request. The current signing
|
||||||
|
// is invalid, and needs to be request because the request will fail.
|
||||||
|
ctx.removePresign()
|
||||||
|
|
||||||
|
// Update the request's query string to ensure the values stays in
|
||||||
|
// sync in the case retrieving the new credentials fails.
|
||||||
|
ctx.Request.URL.RawQuery = ctx.Query.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) assignAmzQueryValues() {
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
||||||
|
if ctx.credValues.SessionToken != "" {
|
||||||
|
ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||||
|
} else {
|
||||||
|
ctx.Query.Del("X-Amz-Security-Token")
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.credValues.SessionToken != "" {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignRequestHandler is a named request handler the SDK will use to sign
|
||||||
|
// service client request with using the V4 signature.
|
||||||
|
var SignRequestHandler = request.NamedHandler{
|
||||||
|
Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignSDKRequest signs an AWS request with the V4 signature. This
|
||||||
|
// request handler is bested used only with the SDK's built in service client's
|
||||||
|
// API operation requests.
|
||||||
|
//
|
||||||
|
// This function should not be used on its on its own, but in conjunction with
|
||||||
|
// an AWS service client's API operation call. To sign a standalone request
|
||||||
|
// not created by a service client's API operation method use the "Sign" or
|
||||||
|
// "Presign" functions of the "Signer" type.
|
||||||
|
//
|
||||||
|
// If the credentials of the request's config are set to
|
||||||
|
// credentials.AnonymousCredentials the request will not be signed.
|
||||||
|
func SignSDKRequest(req *request.Request) {
|
||||||
|
signSDKRequestWithCurrTime(req, time.Now)
|
||||||
|
}
|
||||||
|
func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
|
||||||
|
// If the request does not need to be signed ignore the signing of the
|
||||||
|
// request if the AnonymousCredentials object is used.
|
||||||
|
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
region := req.ClientInfo.SigningRegion
|
||||||
|
if region == "" {
|
||||||
|
region = aws.StringValue(req.Config.Region)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := req.ClientInfo.SigningName
|
||||||
|
if name == "" {
|
||||||
|
name = req.ClientInfo.ServiceName
|
||||||
|
}
|
||||||
|
|
||||||
|
v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
|
||||||
|
v4.Debug = req.Config.LogLevel.Value()
|
||||||
|
v4.Logger = req.Config.Logger
|
||||||
|
v4.DisableHeaderHoisting = req.NotHoist
|
||||||
|
v4.currentTimeFn = curTimeFn
|
||||||
|
})
|
||||||
|
|
||||||
|
signingTime := req.Time
|
||||||
|
if !req.LastSignedAt.IsZero() {
|
||||||
|
signingTime = req.LastSignedAt
|
||||||
|
}
|
||||||
|
|
||||||
|
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime)
|
||||||
|
if err != nil {
|
||||||
|
req.Error = err
|
||||||
|
req.SignedHeaderVals = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req.SignedHeaderVals = signedHeaders
|
||||||
|
req.LastSignedAt = curTimeFn()
|
||||||
|
}
|
||||||
|
|
||||||
|
const logSignInfoMsg = `DEBUG: Request Signiture:
|
||||||
|
---[ CANONICAL STRING ]-----------------------------
|
||||||
|
%s
|
||||||
|
---[ STRING TO SIGN ]--------------------------------
|
||||||
|
%s%s
|
||||||
|
-----------------------------------------------------`
|
||||||
|
const logSignedURLMsg = `
|
||||||
|
---[ SIGNED URL ]------------------------------------
|
||||||
|
%s`
|
||||||
|
|
||||||
|
func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
|
||||||
|
signedURLMsg := ""
|
||||||
|
if ctx.isPresign {
|
||||||
|
signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
|
||||||
|
}
|
||||||
|
msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
|
||||||
|
v4.Logger.Log(msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) build(disableHeaderHoisting bool) {
|
||||||
|
ctx.buildTime() // no depends
|
||||||
|
ctx.buildCredentialString() // no depends
|
||||||
|
|
||||||
|
unsignedHeaders := ctx.Request.Header
|
||||||
|
if ctx.isPresign {
|
||||||
|
if !disableHeaderHoisting {
|
||||||
|
urlValues := url.Values{}
|
||||||
|
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
||||||
|
for k := range urlValues {
|
||||||
|
ctx.Query[k] = urlValues[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.buildBodyDigest()
|
||||||
|
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
||||||
|
ctx.buildCanonicalString() // depends on canon headers / signed headers
|
||||||
|
ctx.buildStringToSign() // depends on canon string
|
||||||
|
ctx.buildSignature() // depends on string to sign
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
|
||||||
|
} else {
|
||||||
|
parts := []string{
|
||||||
|
authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
|
||||||
|
"SignedHeaders=" + ctx.signedHeaders,
|
||||||
|
"Signature=" + ctx.signature,
|
||||||
|
}
|
||||||
|
ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildTime() {
|
||||||
|
ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
|
||||||
|
ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
duration := int64(ctx.ExpireTime / time.Second)
|
||||||
|
ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
|
||||||
|
ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
||||||
|
} else {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildCredentialString() {
|
||||||
|
ctx.credentialString = strings.Join([]string{
|
||||||
|
ctx.formattedShortTime,
|
||||||
|
ctx.Region,
|
||||||
|
ctx.ServiceName,
|
||||||
|
"aws4_request",
|
||||||
|
}, "/")
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
||||||
|
query := url.Values{}
|
||||||
|
unsignedHeaders := http.Header{}
|
||||||
|
for k, h := range header {
|
||||||
|
if r.IsValid(k) {
|
||||||
|
query[k] = h
|
||||||
|
} else {
|
||||||
|
unsignedHeaders[k] = h
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return query, unsignedHeaders
|
||||||
|
}
|
||||||
|
func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
|
||||||
|
var headers []string
|
||||||
|
headers = append(headers, "host")
|
||||||
|
for k, v := range header {
|
||||||
|
canonicalKey := http.CanonicalHeaderKey(k)
|
||||||
|
if !r.IsValid(canonicalKey) {
|
||||||
|
continue // ignored header
|
||||||
|
}
|
||||||
|
if ctx.SignedHeaderVals == nil {
|
||||||
|
ctx.SignedHeaderVals = make(http.Header)
|
||||||
|
}
|
||||||
|
|
||||||
|
lowerCaseKey := strings.ToLower(k)
|
||||||
|
if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
|
||||||
|
// include additional values
|
||||||
|
ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = append(headers, lowerCaseKey)
|
||||||
|
ctx.SignedHeaderVals[lowerCaseKey] = v
|
||||||
|
}
|
||||||
|
sort.Strings(headers)
|
||||||
|
|
||||||
|
ctx.signedHeaders = strings.Join(headers, ";")
|
||||||
|
|
||||||
|
if ctx.isPresign {
|
||||||
|
ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
|
||||||
|
}
|
||||||
|
|
||||||
|
headerValues := make([]string, len(headers))
|
||||||
|
for i, k := range headers {
|
||||||
|
if k == "host" {
|
||||||
|
headerValues[i] = "host:" + ctx.Request.URL.Host
|
||||||
|
} else {
|
||||||
|
headerValues[i] = k + ":" +
|
||||||
|
strings.Join(ctx.SignedHeaderVals[k], ",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildCanonicalString() {
|
||||||
|
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
|
||||||
|
uri := ctx.Request.URL.Opaque
|
||||||
|
if uri != "" {
|
||||||
|
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
||||||
|
} else {
|
||||||
|
uri = ctx.Request.URL.Path
|
||||||
|
}
|
||||||
|
if uri == "" {
|
||||||
|
uri = "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.ServiceName != "s3" {
|
||||||
|
uri = rest.EscapePath(uri, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.canonicalString = strings.Join([]string{
|
||||||
|
ctx.Request.Method,
|
||||||
|
uri,
|
||||||
|
ctx.Request.URL.RawQuery,
|
||||||
|
ctx.canonicalHeaders + "\n",
|
||||||
|
ctx.signedHeaders,
|
||||||
|
ctx.bodyDigest,
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildStringToSign() {
|
||||||
|
ctx.stringToSign = strings.Join([]string{
|
||||||
|
authHeaderPrefix,
|
||||||
|
ctx.formattedTime,
|
||||||
|
ctx.credentialString,
|
||||||
|
hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
|
||||||
|
}, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildSignature() {
|
||||||
|
secret := ctx.credValues.SecretAccessKey
|
||||||
|
date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
|
||||||
|
region := makeHmac(date, []byte(ctx.Region))
|
||||||
|
service := makeHmac(region, []byte(ctx.ServiceName))
|
||||||
|
credentials := makeHmac(service, []byte("aws4_request"))
|
||||||
|
signature := makeHmac(credentials, []byte(ctx.stringToSign))
|
||||||
|
ctx.signature = hex.EncodeToString(signature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctx *signingCtx) buildBodyDigest() {
|
||||||
|
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
|
||||||
|
if hash == "" {
|
||||||
|
if ctx.isPresign && ctx.ServiceName == "s3" {
|
||||||
|
hash = "UNSIGNED-PAYLOAD"
|
||||||
|
} else if ctx.Body == nil {
|
||||||
|
hash = emptyStringSHA256
|
||||||
|
} else {
|
||||||
|
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
|
||||||
|
}
|
||||||
|
if ctx.ServiceName == "s3" {
|
||||||
|
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ctx.bodyDigest = hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// isRequestSigned returns if the request is currently signed or presigned
|
||||||
|
func (ctx *signingCtx) isRequestSigned() bool {
|
||||||
|
if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if ctx.Request.Header.Get("Authorization") != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsign removes signing flags for both signed and presigned requests.
|
||||||
|
func (ctx *signingCtx) removePresign() {
|
||||||
|
ctx.Query.Del("X-Amz-Algorithm")
|
||||||
|
ctx.Query.Del("X-Amz-Signature")
|
||||||
|
ctx.Query.Del("X-Amz-Security-Token")
|
||||||
|
ctx.Query.Del("X-Amz-Date")
|
||||||
|
ctx.Query.Del("X-Amz-Expires")
|
||||||
|
ctx.Query.Del("X-Amz-Credential")
|
||||||
|
ctx.Query.Del("X-Amz-SignedHeaders")
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeHmac(key []byte, data []byte) []byte {
|
||||||
|
hash := hmac.New(sha256.New, key)
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeSha256(data []byte) []byte {
|
||||||
|
hash := sha256.New()
|
||||||
|
hash.Write(data)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
||||||
|
hash := sha256.New()
|
||||||
|
start, _ := reader.Seek(0, 1)
|
||||||
|
defer reader.Seek(start, 0)
|
||||||
|
|
||||||
|
io.Copy(hash, reader)
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
const doubleSpaces = " "
|
||||||
|
|
||||||
|
var doubleSpaceBytes = []byte(doubleSpaces)
|
||||||
|
|
||||||
|
func stripExcessSpaces(headerVals []string) []string {
|
||||||
|
vals := make([]string, len(headerVals))
|
||||||
|
for i, str := range headerVals {
|
||||||
|
// Trim leading and trailing spaces
|
||||||
|
trimmed := strings.TrimSpace(str)
|
||||||
|
|
||||||
|
idx := strings.Index(trimmed, doubleSpaces)
|
||||||
|
var buf []byte
|
||||||
|
for idx > -1 {
|
||||||
|
// Multiple adjacent spaces found
|
||||||
|
if buf == nil {
|
||||||
|
// first time create the buffer
|
||||||
|
buf = []byte(trimmed)
|
||||||
|
}
|
||||||
|
|
||||||
|
stripToIdx := -1
|
||||||
|
for j := idx + 1; j < len(buf); j++ {
|
||||||
|
if buf[j] != ' ' {
|
||||||
|
buf = append(buf[:idx+1], buf[j:]...)
|
||||||
|
stripToIdx = j
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if stripToIdx >= 0 {
|
||||||
|
idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
|
||||||
|
if idx >= 0 {
|
||||||
|
idx += stripToIdx
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
idx = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if buf != nil {
|
||||||
|
vals[i] = string(buf)
|
||||||
|
} else {
|
||||||
|
vals[i] = trimmed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
26
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
26
vendor/github.com/aws/aws-sdk-go/aws/types.go
generated
vendored
|
@ -61,23 +61,41 @@ func (r ReaderSeekerCloser) Close() error {
|
||||||
type WriteAtBuffer struct {
|
type WriteAtBuffer struct {
|
||||||
buf []byte
|
buf []byte
|
||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
|
|
||||||
|
// GrowthCoeff defines the growth rate of the internal buffer. By
|
||||||
|
// default, the growth rate is 1, where expanding the internal
|
||||||
|
// buffer will allocate only enough capacity to fit the new expected
|
||||||
|
// length.
|
||||||
|
GrowthCoeff float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
|
||||||
|
// provided by buf.
|
||||||
|
func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
|
||||||
|
return &WriteAtBuffer{buf: buf}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteAt writes a slice of bytes to a buffer starting at the position provided
|
// WriteAt writes a slice of bytes to a buffer starting at the position provided
|
||||||
// The number of bytes written will be returned, or error. Can overwrite previous
|
// The number of bytes written will be returned, or error. Can overwrite previous
|
||||||
// written slices if the write ats overlap.
|
// written slices if the write ats overlap.
|
||||||
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
|
||||||
|
pLen := len(p)
|
||||||
|
expLen := pos + int64(pLen)
|
||||||
b.m.Lock()
|
b.m.Lock()
|
||||||
defer b.m.Unlock()
|
defer b.m.Unlock()
|
||||||
|
|
||||||
expLen := pos + int64(len(p))
|
|
||||||
if int64(len(b.buf)) < expLen {
|
if int64(len(b.buf)) < expLen {
|
||||||
newBuf := make([]byte, expLen)
|
if int64(cap(b.buf)) < expLen {
|
||||||
|
if b.GrowthCoeff < 1 {
|
||||||
|
b.GrowthCoeff = 1
|
||||||
|
}
|
||||||
|
newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
|
||||||
copy(newBuf, b.buf)
|
copy(newBuf, b.buf)
|
||||||
b.buf = newBuf
|
b.buf = newBuf
|
||||||
}
|
}
|
||||||
|
b.buf = b.buf[:expLen]
|
||||||
|
}
|
||||||
copy(b.buf[pos:], p)
|
copy(b.buf[pos:], p)
|
||||||
return len(p), nil
|
return pLen, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns a slice of bytes written to the buffer.
|
// Bytes returns a slice of bytes written to the buffer.
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.1.0"
|
const SDKVersion = "1.2.4"
|
||||||
|
|
39
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
generated
vendored
39
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
generated
vendored
|
@ -8,6 +8,9 @@
|
||||||
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
"endpoint": "{service}.{region}.amazonaws.com.cn",
|
||||||
"signatureVersion": "v4"
|
"signatureVersion": "v4"
|
||||||
},
|
},
|
||||||
|
"cn-north-1/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
"us-gov-west-1/iam": {
|
"us-gov-west-1/iam": {
|
||||||
"endpoint": "iam.us-gov.amazonaws.com"
|
"endpoint": "iam.us-gov.amazonaws.com"
|
||||||
},
|
},
|
||||||
|
@ -17,6 +20,9 @@
|
||||||
"us-gov-west-1/s3": {
|
"us-gov-west-1/s3": {
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
"endpoint": "s3-{region}.amazonaws.com"
|
||||||
},
|
},
|
||||||
|
"us-gov-west-1/ec2metadata": {
|
||||||
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
|
},
|
||||||
"*/cloudfront": {
|
"*/cloudfront": {
|
||||||
"endpoint": "cloudfront.amazonaws.com",
|
"endpoint": "cloudfront.amazonaws.com",
|
||||||
"signingRegion": "us-east-1"
|
"signingRegion": "us-east-1"
|
||||||
|
@ -30,8 +36,7 @@
|
||||||
"signingRegion": "us-east-1"
|
"signingRegion": "us-east-1"
|
||||||
},
|
},
|
||||||
"*/ec2metadata": {
|
"*/ec2metadata": {
|
||||||
"endpoint": "http://169.254.169.254/latest",
|
"endpoint": "http://169.254.169.254/latest"
|
||||||
"signingRegion": "us-east-1"
|
|
||||||
},
|
},
|
||||||
"*/iam": {
|
"*/iam": {
|
||||||
"endpoint": "iam.amazonaws.com",
|
"endpoint": "iam.amazonaws.com",
|
||||||
|
@ -57,36 +62,14 @@
|
||||||
"endpoint": "sdb.amazonaws.com",
|
"endpoint": "sdb.amazonaws.com",
|
||||||
"signingRegion": "us-east-1"
|
"signingRegion": "us-east-1"
|
||||||
},
|
},
|
||||||
|
"*/s3": {
|
||||||
|
"endpoint": "s3-{region}.amazonaws.com"
|
||||||
|
},
|
||||||
"us-east-1/s3": {
|
"us-east-1/s3": {
|
||||||
"endpoint": "s3.amazonaws.com"
|
"endpoint": "s3.amazonaws.com"
|
||||||
},
|
},
|
||||||
"us-west-1/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"us-west-2/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"eu-west-1/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"ap-southeast-1/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"ap-southeast-2/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"ap-northeast-1/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"ap-northeast-2/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"sa-east-1/s3": {
|
|
||||||
"endpoint": "s3-{region}.amazonaws.com"
|
|
||||||
},
|
|
||||||
"eu-central-1/s3": {
|
"eu-central-1/s3": {
|
||||||
"endpoint": "{service}.{region}.amazonaws.com",
|
"endpoint": "{service}.{region}.amazonaws.com"
|
||||||
"signatureVersion": "v4"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
34
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
34
vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
generated
vendored
|
@ -32,7 +32,6 @@ var endpointsMap = endpointStruct{
|
||||||
},
|
},
|
||||||
"*/ec2metadata": {
|
"*/ec2metadata": {
|
||||||
Endpoint: "http://169.254.169.254/latest",
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
SigningRegion: "us-east-1",
|
|
||||||
},
|
},
|
||||||
"*/iam": {
|
"*/iam": {
|
||||||
Endpoint: "iam.amazonaws.com",
|
Endpoint: "iam.amazonaws.com",
|
||||||
|
@ -46,6 +45,9 @@ var endpointsMap = endpointStruct{
|
||||||
Endpoint: "route53.amazonaws.com",
|
Endpoint: "route53.amazonaws.com",
|
||||||
SigningRegion: "us-east-1",
|
SigningRegion: "us-east-1",
|
||||||
},
|
},
|
||||||
|
"*/s3": {
|
||||||
|
Endpoint: "s3-{region}.amazonaws.com",
|
||||||
|
},
|
||||||
"*/sts": {
|
"*/sts": {
|
||||||
Endpoint: "sts.amazonaws.com",
|
Endpoint: "sts.amazonaws.com",
|
||||||
SigningRegion: "us-east-1",
|
SigningRegion: "us-east-1",
|
||||||
|
@ -54,30 +56,15 @@ var endpointsMap = endpointStruct{
|
||||||
Endpoint: "waf.amazonaws.com",
|
Endpoint: "waf.amazonaws.com",
|
||||||
SigningRegion: "us-east-1",
|
SigningRegion: "us-east-1",
|
||||||
},
|
},
|
||||||
"ap-northeast-1/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"ap-northeast-2/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"ap-southeast-1/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"ap-southeast-2/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"cn-north-1/*": {
|
"cn-north-1/*": {
|
||||||
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
Endpoint: "{service}.{region}.amazonaws.com.cn",
|
||||||
},
|
},
|
||||||
|
"cn-north-1/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
"eu-central-1/s3": {
|
"eu-central-1/s3": {
|
||||||
Endpoint: "{service}.{region}.amazonaws.com",
|
Endpoint: "{service}.{region}.amazonaws.com",
|
||||||
},
|
},
|
||||||
"eu-west-1/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"sa-east-1/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"us-east-1/s3": {
|
"us-east-1/s3": {
|
||||||
Endpoint: "s3.amazonaws.com",
|
Endpoint: "s3.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
@ -85,6 +72,9 @@ var endpointsMap = endpointStruct{
|
||||||
Endpoint: "sdb.amazonaws.com",
|
Endpoint: "sdb.amazonaws.com",
|
||||||
SigningRegion: "us-east-1",
|
SigningRegion: "us-east-1",
|
||||||
},
|
},
|
||||||
|
"us-gov-west-1/ec2metadata": {
|
||||||
|
Endpoint: "http://169.254.169.254/latest",
|
||||||
|
},
|
||||||
"us-gov-west-1/iam": {
|
"us-gov-west-1/iam": {
|
||||||
Endpoint: "iam.us-gov.amazonaws.com",
|
Endpoint: "iam.us-gov.amazonaws.com",
|
||||||
},
|
},
|
||||||
|
@ -94,11 +84,5 @@ var endpointsMap = endpointStruct{
|
||||||
"us-gov-west-1/sts": {
|
"us-gov-west-1/sts": {
|
||||||
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
Endpoint: "sts.us-gov-west-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
"us-west-1/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
"us-west-2/s3": {
|
|
||||||
Endpoint: "s3-{region}.amazonaws.com",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
38
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
38
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
generated
vendored
|
@ -2,7 +2,7 @@ package query
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"io"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
@ -15,6 +15,10 @@ type xmlErrorResponse struct {
|
||||||
RequestID string `xml:"RequestId"`
|
RequestID string `xml:"RequestId"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type xmlServiceUnavailableResponse struct {
|
||||||
|
XMLName xml.Name `xml:"ServiceUnavailableException"`
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalErrorHandler is a name request handler to unmarshal request errors
|
// UnmarshalErrorHandler is a name request handler to unmarshal request errors
|
||||||
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
|
var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
|
||||||
|
|
||||||
|
@ -22,11 +26,16 @@ var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalEr
|
||||||
func UnmarshalError(r *request.Request) {
|
func UnmarshalError(r *request.Request) {
|
||||||
defer r.HTTPResponse.Body.Close()
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
|
||||||
resp := &xmlErrorResponse{}
|
bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||||
err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
|
if err != nil {
|
||||||
if err != nil && err != io.EOF {
|
r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
|
||||||
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
|
return
|
||||||
} else {
|
}
|
||||||
|
|
||||||
|
// First check for specific error
|
||||||
|
resp := xmlErrorResponse{}
|
||||||
|
decodeErr := xml.Unmarshal(bodyBytes, &resp)
|
||||||
|
if decodeErr == nil {
|
||||||
reqID := resp.RequestID
|
reqID := resp.RequestID
|
||||||
if reqID == "" {
|
if reqID == "" {
|
||||||
reqID = r.RequestID
|
reqID = r.RequestID
|
||||||
|
@ -36,5 +45,22 @@ func UnmarshalError(r *request.Request) {
|
||||||
r.HTTPResponse.StatusCode,
|
r.HTTPResponse.StatusCode,
|
||||||
reqID,
|
reqID,
|
||||||
)
|
)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for unhandled error
|
||||||
|
servUnavailResp := xmlServiceUnavailableResponse{}
|
||||||
|
unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
|
||||||
|
if unavailErr == nil {
|
||||||
|
r.Error = awserr.NewRequestFailure(
|
||||||
|
awserr.New("ServiceUnavailableException", "service is unavailable", nil),
|
||||||
|
r.HTTPResponse.StatusCode,
|
||||||
|
r.RequestID,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Failed to retrieve any error message from the response body
|
||||||
|
r.Error = awserr.New("SerializationError",
|
||||||
|
"failed to decode query XML error response", decodeErr)
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
|
@ -222,8 +222,7 @@ func EscapePath(path string, encodeSep bool) string {
|
||||||
if noEscape[c] || (c == '/' && !encodeSep) {
|
if noEscape[c] || (c == '/' && !encodeSep) {
|
||||||
buf.WriteByte(c)
|
buf.WriteByte(c)
|
||||||
} else {
|
} else {
|
||||||
buf.WriteByte('%')
|
fmt.Fprintf(&buf, "%%%02X", c)
|
||||||
buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16)))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
|
|
5
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
|
@ -3,6 +3,7 @@ package rest
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -51,6 +52,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
|
||||||
if payload.IsValid() {
|
if payload.IsValid() {
|
||||||
switch payload.Interface().(type) {
|
switch payload.Interface().(type) {
|
||||||
case []byte:
|
case []byte:
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||||
|
@ -58,6 +60,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
|
||||||
payload.Set(reflect.ValueOf(b))
|
payload.Set(reflect.ValueOf(b))
|
||||||
}
|
}
|
||||||
case *string:
|
case *string:
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
b, err := ioutil.ReadAll(r.HTTPResponse.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
|
||||||
|
@ -72,6 +75,8 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
|
||||||
case "aws.ReadSeekCloser", "io.ReadCloser":
|
case "aws.ReadSeekCloser", "io.ReadCloser":
|
||||||
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
|
payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
|
||||||
default:
|
default:
|
||||||
|
io.Copy(ioutil.Discard, r.HTTPResponse.Body)
|
||||||
|
defer r.HTTPResponse.Body.Close()
|
||||||
r.Error = awserr.New("SerializationError",
|
r.Error = awserr.New("SerializationError",
|
||||||
"failed to decode REST response",
|
"failed to decode REST response",
|
||||||
fmt.Errorf("unknown payload type %s", payload.Type()))
|
fmt.Errorf("unknown payload type %s", payload.Type()))
|
||||||
|
|
438
vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
generated
vendored
438
vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
generated
vendored
|
@ -1,438 +0,0 @@
|
||||||
// Package v4 implements signing for AWS V4 signer
|
|
||||||
package v4
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
authHeaderPrefix = "AWS4-HMAC-SHA256"
|
|
||||||
timeFormat = "20060102T150405Z"
|
|
||||||
shortTimeFormat = "20060102"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ignoredHeaders = rules{
|
|
||||||
blacklist{
|
|
||||||
mapRule{
|
|
||||||
"Content-Length": struct{}{},
|
|
||||||
"User-Agent": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// requiredSignedHeaders is a whitelist for build canonical headers.
|
|
||||||
var requiredSignedHeaders = rules{
|
|
||||||
whitelist{
|
|
||||||
mapRule{
|
|
||||||
"Cache-Control": struct{}{},
|
|
||||||
"Content-Disposition": struct{}{},
|
|
||||||
"Content-Encoding": struct{}{},
|
|
||||||
"Content-Language": struct{}{},
|
|
||||||
"Content-Md5": struct{}{},
|
|
||||||
"Content-Type": struct{}{},
|
|
||||||
"Expires": struct{}{},
|
|
||||||
"If-Match": struct{}{},
|
|
||||||
"If-Modified-Since": struct{}{},
|
|
||||||
"If-None-Match": struct{}{},
|
|
||||||
"If-Unmodified-Since": struct{}{},
|
|
||||||
"Range": struct{}{},
|
|
||||||
"X-Amz-Acl": struct{}{},
|
|
||||||
"X-Amz-Copy-Source": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Modified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-None-Match": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Range": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Grant-Full-control": struct{}{},
|
|
||||||
"X-Amz-Grant-Read": struct{}{},
|
|
||||||
"X-Amz-Grant-Read-Acp": struct{}{},
|
|
||||||
"X-Amz-Grant-Write": struct{}{},
|
|
||||||
"X-Amz-Grant-Write-Acp": struct{}{},
|
|
||||||
"X-Amz-Metadata-Directive": struct{}{},
|
|
||||||
"X-Amz-Mfa": struct{}{},
|
|
||||||
"X-Amz-Request-Payer": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
|
|
||||||
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
|
|
||||||
"X-Amz-Storage-Class": struct{}{},
|
|
||||||
"X-Amz-Website-Redirect-Location": struct{}{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
patterns{"X-Amz-Meta-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// allowedHoisting is a whitelist for build query headers. The boolean value
|
|
||||||
// represents whether or not it is a pattern.
|
|
||||||
var allowedQueryHoisting = inclusiveRules{
|
|
||||||
blacklist{requiredSignedHeaders},
|
|
||||||
patterns{"X-Amz-"},
|
|
||||||
}
|
|
||||||
|
|
||||||
type signer struct {
|
|
||||||
Request *http.Request
|
|
||||||
Time time.Time
|
|
||||||
ExpireTime time.Duration
|
|
||||||
ServiceName string
|
|
||||||
Region string
|
|
||||||
CredValues credentials.Value
|
|
||||||
Credentials *credentials.Credentials
|
|
||||||
Query url.Values
|
|
||||||
Body io.ReadSeeker
|
|
||||||
Debug aws.LogLevelType
|
|
||||||
Logger aws.Logger
|
|
||||||
|
|
||||||
isPresign bool
|
|
||||||
formattedTime string
|
|
||||||
formattedShortTime string
|
|
||||||
|
|
||||||
signedHeaders string
|
|
||||||
canonicalHeaders string
|
|
||||||
canonicalString string
|
|
||||||
credentialString string
|
|
||||||
stringToSign string
|
|
||||||
signature string
|
|
||||||
authorization string
|
|
||||||
notHoist bool
|
|
||||||
signedHeaderVals http.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sign requests with signature version 4.
|
|
||||||
//
|
|
||||||
// Will sign the requests with the service config's Credentials object
|
|
||||||
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
|
|
||||||
// object.
|
|
||||||
func Sign(req *request.Request) {
|
|
||||||
// If the request does not need to be signed ignore the signing of the
|
|
||||||
// request if the AnonymousCredentials object is used.
|
|
||||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
region := req.ClientInfo.SigningRegion
|
|
||||||
if region == "" {
|
|
||||||
region = aws.StringValue(req.Config.Region)
|
|
||||||
}
|
|
||||||
|
|
||||||
name := req.ClientInfo.SigningName
|
|
||||||
if name == "" {
|
|
||||||
name = req.ClientInfo.ServiceName
|
|
||||||
}
|
|
||||||
|
|
||||||
s := signer{
|
|
||||||
Request: req.HTTPRequest,
|
|
||||||
Time: req.Time,
|
|
||||||
ExpireTime: req.ExpireTime,
|
|
||||||
Query: req.HTTPRequest.URL.Query(),
|
|
||||||
Body: req.Body,
|
|
||||||
ServiceName: name,
|
|
||||||
Region: region,
|
|
||||||
Credentials: req.Config.Credentials,
|
|
||||||
Debug: req.Config.LogLevel.Value(),
|
|
||||||
Logger: req.Config.Logger,
|
|
||||||
notHoist: req.NotHoist,
|
|
||||||
}
|
|
||||||
|
|
||||||
req.Error = s.sign()
|
|
||||||
req.SignedHeaderVals = s.signedHeaderVals
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) sign() error {
|
|
||||||
if v4.ExpireTime != 0 {
|
|
||||||
v4.isPresign = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if v4.isRequestSigned() {
|
|
||||||
if !v4.Credentials.IsExpired() {
|
|
||||||
// If the request is already signed, and the credentials have not
|
|
||||||
// expired yet ignore the signing request.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The credentials have expired for this request. The current signing
|
|
||||||
// is invalid, and needs to be request because the request will fail.
|
|
||||||
if v4.isPresign {
|
|
||||||
v4.removePresign()
|
|
||||||
// Update the request's query string to ensure the values stays in
|
|
||||||
// sync in the case retrieving the new credentials fails.
|
|
||||||
v4.Request.URL.RawQuery = v4.Query.Encode()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
v4.CredValues, err = v4.Credentials.Get()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v4.isPresign {
|
|
||||||
v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
|
|
||||||
if v4.CredValues.SessionToken != "" {
|
|
||||||
v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
|
||||||
} else {
|
|
||||||
v4.Query.Del("X-Amz-Security-Token")
|
|
||||||
}
|
|
||||||
} else if v4.CredValues.SessionToken != "" {
|
|
||||||
v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
v4.build()
|
|
||||||
|
|
||||||
if v4.Debug.Matches(aws.LogDebugWithSigning) {
|
|
||||||
v4.logSigningInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const logSignInfoMsg = `DEBUG: Request Signiture:
|
|
||||||
---[ CANONICAL STRING ]-----------------------------
|
|
||||||
%s
|
|
||||||
---[ STRING TO SIGN ]--------------------------------
|
|
||||||
%s%s
|
|
||||||
-----------------------------------------------------`
|
|
||||||
const logSignedURLMsg = `
|
|
||||||
---[ SIGNED URL ]------------------------------------
|
|
||||||
%s`
|
|
||||||
|
|
||||||
func (v4 *signer) logSigningInfo() {
|
|
||||||
signedURLMsg := ""
|
|
||||||
if v4.isPresign {
|
|
||||||
signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
|
|
||||||
}
|
|
||||||
msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
|
|
||||||
v4.Logger.Log(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) build() {
|
|
||||||
|
|
||||||
v4.buildTime() // no depends
|
|
||||||
v4.buildCredentialString() // no depends
|
|
||||||
|
|
||||||
unsignedHeaders := v4.Request.Header
|
|
||||||
if v4.isPresign {
|
|
||||||
if !v4.notHoist {
|
|
||||||
urlValues := url.Values{}
|
|
||||||
urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
|
|
||||||
for k := range urlValues {
|
|
||||||
v4.Query[k] = urlValues[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
|
|
||||||
v4.buildCanonicalString() // depends on canon headers / signed headers
|
|
||||||
v4.buildStringToSign() // depends on canon string
|
|
||||||
v4.buildSignature() // depends on string to sign
|
|
||||||
|
|
||||||
if v4.isPresign {
|
|
||||||
v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
|
|
||||||
} else {
|
|
||||||
parts := []string{
|
|
||||||
authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
|
|
||||||
"SignedHeaders=" + v4.signedHeaders,
|
|
||||||
"Signature=" + v4.signature,
|
|
||||||
}
|
|
||||||
v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) buildTime() {
|
|
||||||
v4.formattedTime = v4.Time.UTC().Format(timeFormat)
|
|
||||||
v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
|
|
||||||
|
|
||||||
if v4.isPresign {
|
|
||||||
duration := int64(v4.ExpireTime / time.Second)
|
|
||||||
v4.Query.Set("X-Amz-Date", v4.formattedTime)
|
|
||||||
v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
|
|
||||||
} else {
|
|
||||||
v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) buildCredentialString() {
|
|
||||||
v4.credentialString = strings.Join([]string{
|
|
||||||
v4.formattedShortTime,
|
|
||||||
v4.Region,
|
|
||||||
v4.ServiceName,
|
|
||||||
"aws4_request",
|
|
||||||
}, "/")
|
|
||||||
|
|
||||||
if v4.isPresign {
|
|
||||||
v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
|
|
||||||
query := url.Values{}
|
|
||||||
unsignedHeaders := http.Header{}
|
|
||||||
for k, h := range header {
|
|
||||||
if r.IsValid(k) {
|
|
||||||
query[k] = h
|
|
||||||
} else {
|
|
||||||
unsignedHeaders[k] = h
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return query, unsignedHeaders
|
|
||||||
}
|
|
||||||
func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
|
|
||||||
var headers []string
|
|
||||||
headers = append(headers, "host")
|
|
||||||
for k, v := range header {
|
|
||||||
canonicalKey := http.CanonicalHeaderKey(k)
|
|
||||||
if !r.IsValid(canonicalKey) {
|
|
||||||
continue // ignored header
|
|
||||||
}
|
|
||||||
|
|
||||||
lowerCaseKey := strings.ToLower(k)
|
|
||||||
headers = append(headers, lowerCaseKey)
|
|
||||||
|
|
||||||
if v4.signedHeaderVals == nil {
|
|
||||||
v4.signedHeaderVals = make(http.Header)
|
|
||||||
}
|
|
||||||
v4.signedHeaderVals[lowerCaseKey] = v
|
|
||||||
}
|
|
||||||
sort.Strings(headers)
|
|
||||||
|
|
||||||
v4.signedHeaders = strings.Join(headers, ";")
|
|
||||||
|
|
||||||
if v4.isPresign {
|
|
||||||
v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
|
|
||||||
}
|
|
||||||
|
|
||||||
headerValues := make([]string, len(headers))
|
|
||||||
for i, k := range headers {
|
|
||||||
if k == "host" {
|
|
||||||
headerValues[i] = "host:" + v4.Request.URL.Host
|
|
||||||
} else {
|
|
||||||
headerValues[i] = k + ":" +
|
|
||||||
strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
v4.canonicalHeaders = strings.Join(headerValues, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) buildCanonicalString() {
|
|
||||||
v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
|
|
||||||
uri := v4.Request.URL.Opaque
|
|
||||||
if uri != "" {
|
|
||||||
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
|
|
||||||
} else {
|
|
||||||
uri = v4.Request.URL.Path
|
|
||||||
}
|
|
||||||
if uri == "" {
|
|
||||||
uri = "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
if v4.ServiceName != "s3" {
|
|
||||||
uri = rest.EscapePath(uri, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
v4.canonicalString = strings.Join([]string{
|
|
||||||
v4.Request.Method,
|
|
||||||
uri,
|
|
||||||
v4.Request.URL.RawQuery,
|
|
||||||
v4.canonicalHeaders + "\n",
|
|
||||||
v4.signedHeaders,
|
|
||||||
v4.bodyDigest(),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) buildStringToSign() {
|
|
||||||
v4.stringToSign = strings.Join([]string{
|
|
||||||
authHeaderPrefix,
|
|
||||||
v4.formattedTime,
|
|
||||||
v4.credentialString,
|
|
||||||
hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) buildSignature() {
|
|
||||||
secret := v4.CredValues.SecretAccessKey
|
|
||||||
date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
|
|
||||||
region := makeHmac(date, []byte(v4.Region))
|
|
||||||
service := makeHmac(region, []byte(v4.ServiceName))
|
|
||||||
credentials := makeHmac(service, []byte("aws4_request"))
|
|
||||||
signature := makeHmac(credentials, []byte(v4.stringToSign))
|
|
||||||
v4.signature = hex.EncodeToString(signature)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v4 *signer) bodyDigest() string {
|
|
||||||
hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
|
|
||||||
if hash == "" {
|
|
||||||
if v4.isPresign && v4.ServiceName == "s3" {
|
|
||||||
hash = "UNSIGNED-PAYLOAD"
|
|
||||||
} else if v4.Body == nil {
|
|
||||||
hash = hex.EncodeToString(makeSha256([]byte{}))
|
|
||||||
} else {
|
|
||||||
hash = hex.EncodeToString(makeSha256Reader(v4.Body))
|
|
||||||
}
|
|
||||||
v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
|
|
||||||
}
|
|
||||||
return hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// isRequestSigned returns if the request is currently signed or presigned
|
|
||||||
func (v4 *signer) isRequestSigned() bool {
|
|
||||||
if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if v4.Request.Header.Get("Authorization") != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// unsign removes signing flags for both signed and presigned requests.
|
|
||||||
func (v4 *signer) removePresign() {
|
|
||||||
v4.Query.Del("X-Amz-Algorithm")
|
|
||||||
v4.Query.Del("X-Amz-Signature")
|
|
||||||
v4.Query.Del("X-Amz-Security-Token")
|
|
||||||
v4.Query.Del("X-Amz-Date")
|
|
||||||
v4.Query.Del("X-Amz-Expires")
|
|
||||||
v4.Query.Del("X-Amz-Credential")
|
|
||||||
v4.Query.Del("X-Amz-SignedHeaders")
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHmac(key []byte, data []byte) []byte {
|
|
||||||
hash := hmac.New(sha256.New, key)
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeSha256(data []byte) []byte {
|
|
||||||
hash := sha256.New()
|
|
||||||
hash.Write(data)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeSha256Reader(reader io.ReadSeeker) []byte {
|
|
||||||
hash := sha256.New()
|
|
||||||
start, _ := reader.Seek(0, 1)
|
|
||||||
defer reader.Seek(start, 0)
|
|
||||||
|
|
||||||
io.Copy(hash, reader)
|
|
||||||
return hash.Sum(nil)
|
|
||||||
}
|
|
18
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go
generated
vendored
18
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go
generated
vendored
|
@ -13,6 +13,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode"
|
||||||
)
|
)
|
||||||
|
|
||||||
// An AWSEpochTime wraps a time value providing JSON serialization needed for
|
// An AWSEpochTime wraps a time value providing JSON serialization needed for
|
||||||
|
@ -110,6 +111,12 @@ func (p *Policy) Validate() error {
|
||||||
if s.Resource == "" {
|
if s.Resource == "" {
|
||||||
return fmt.Errorf("statement at index %d does not have a resource", i)
|
return fmt.Errorf("statement at index %d does not have a resource", i)
|
||||||
}
|
}
|
||||||
|
if !isASCII(s.Resource) {
|
||||||
|
return fmt.Errorf("unable to sign resource, [%s]. "+
|
||||||
|
"Resources must only contain ascii characters. "+
|
||||||
|
"Hostnames with unicode should be encoded as Punycode, (e.g. golang.org/x/net/idna), "+
|
||||||
|
"and URL unicode path/query characters should be escaped.", s.Resource)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -120,7 +127,7 @@ func (p *Policy) Validate() error {
|
||||||
func CreateResource(scheme, u string) (string, error) {
|
func CreateResource(scheme, u string) (string, error) {
|
||||||
scheme = strings.ToLower(scheme)
|
scheme = strings.ToLower(scheme)
|
||||||
|
|
||||||
if scheme == "http" || scheme == "https" {
|
if scheme == "http" || scheme == "https" || scheme == "http*" || scheme == "*" {
|
||||||
return u, nil
|
return u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,3 +215,12 @@ func awsEscapeEncoded(b []byte) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isASCII(u string) bool {
|
||||||
|
for _, c := range u {
|
||||||
|
if c > unicode.MaxASCII {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
241
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go
generated
vendored
Normal file
241
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_cookie.go
generated
vendored
Normal file
|
@ -0,0 +1,241 @@
|
||||||
|
package sign
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rsa"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CookiePolicyName name of the policy cookie
|
||||||
|
CookiePolicyName = "CloudFront-Policy"
|
||||||
|
// CookieSignatureName name of the signature cookie
|
||||||
|
CookieSignatureName = "CloudFront-Signature"
|
||||||
|
// CookieKeyIDName name of the signing Key ID cookie
|
||||||
|
CookieKeyIDName = "CloudFront-Key-Pair-Id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A CookieOptions optional additonal options that can be applied to the signed
|
||||||
|
// cookies.
|
||||||
|
type CookieOptions struct {
|
||||||
|
Path string
|
||||||
|
Domain string
|
||||||
|
Secure bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// apply will integration the options provided into the base cookie options
|
||||||
|
// a new copy will be returned. The base CookieOption will not be modified.
|
||||||
|
func (o CookieOptions) apply(opts ...func(*CookieOptions)) CookieOptions {
|
||||||
|
if len(opts) == 0 {
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(&o)
|
||||||
|
}
|
||||||
|
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// A CookieSigner provides signing utilities to sign Cookies for Amazon CloudFront
|
||||||
|
// resources. Using a private key and Credential Key Pair key ID the CookieSigner
|
||||||
|
// only needs to be created once per Credential Key Pair key ID and private key.
|
||||||
|
//
|
||||||
|
// More information about signed Cookies and their structure can be found at:
|
||||||
|
// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html
|
||||||
|
//
|
||||||
|
// To sign a Cookie, create a CookieSigner with your private key and credential
|
||||||
|
// pair key ID. Once you have a CookieSigner instance you can call Sign or
|
||||||
|
// SignWithPolicy to sign the URLs.
|
||||||
|
//
|
||||||
|
// The signer is safe to use concurrently, but the optional cookies options
|
||||||
|
// are not safe to modify concurrently.
|
||||||
|
type CookieSigner struct {
|
||||||
|
keyID string
|
||||||
|
privKey *rsa.PrivateKey
|
||||||
|
|
||||||
|
Opts CookieOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCookieSigner constructs and returns a new CookieSigner to be used to for
|
||||||
|
// signing Amazon CloudFront URL resources with.
|
||||||
|
func NewCookieSigner(keyID string, privKey *rsa.PrivateKey, opts ...func(*CookieOptions)) *CookieSigner {
|
||||||
|
signer := &CookieSigner{
|
||||||
|
keyID: keyID,
|
||||||
|
privKey: privKey,
|
||||||
|
Opts: CookieOptions{}.apply(opts...),
|
||||||
|
}
|
||||||
|
|
||||||
|
return signer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sign returns the cookies needed to allow user agents to make arbetrary
|
||||||
|
// requests to cloudfront for the resource(s) defined by the policy.
|
||||||
|
//
|
||||||
|
// Sign will create a CloudFront policy with only a resource and condition of
|
||||||
|
// DateLessThan equal to the expires time provided.
|
||||||
|
//
|
||||||
|
// The returned slice cookies should all be added to the Client's cookies or
|
||||||
|
// server's response.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// s := NewCookieSigner(keyID, privKey)
|
||||||
|
//
|
||||||
|
// // Get Signed cookies for a resource that will expire in 1 hour
|
||||||
|
// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour))
|
||||||
|
// if err != nil {
|
||||||
|
// fmt.Println("failed to create signed cookies", err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Or get Signed cookies for a resource that will expire in 1 hour
|
||||||
|
// // and set path and domain of cookies
|
||||||
|
// cookies, err := s.Sign("*", time.Now().Add(1 * time.Hour), func(o *sign.CookieOptions) {
|
||||||
|
// o.Path = "/"
|
||||||
|
// o.Domain = ".example.com"
|
||||||
|
// })
|
||||||
|
// if err != nil {
|
||||||
|
// fmt.Println("failed to create signed cookies", err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Server Response via http.ResponseWriter
|
||||||
|
// for _, c := range cookies {
|
||||||
|
// http.SetCookie(w, c)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Client request via the cookie jar
|
||||||
|
// if client.CookieJar != nil {
|
||||||
|
// for _, c := range cookies {
|
||||||
|
// client.Cookie(w, c)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
func (s CookieSigner) Sign(u string, expires time.Time, opts ...func(*CookieOptions)) ([]*http.Cookie, error) {
|
||||||
|
scheme, err := cookieURLScheme(u)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resource, err := CreateResource(scheme, u)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
p := NewCannedPolicy(resource, expires)
|
||||||
|
return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns and validates the URL's scheme.
|
||||||
|
// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-setting-signed-cookie-custom-policy.html#private-content-custom-policy-statement-cookies
|
||||||
|
func cookieURLScheme(u string) (string, error) {
|
||||||
|
parts := strings.SplitN(u, "://", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return "", fmt.Errorf("invalid cookie URL, missing scheme")
|
||||||
|
}
|
||||||
|
|
||||||
|
scheme := strings.ToLower(parts[0])
|
||||||
|
if scheme != "http" && scheme != "https" && scheme != "http*" {
|
||||||
|
return "", fmt.Errorf("invalid cookie URL scheme. Expect http, https, or http*. Go, %s", scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
return scheme, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignWithPolicy returns the cookies needed to allow user agents to make
|
||||||
|
// arbetrairy requets to cloudfront for the resource(s) defined by the policy.
|
||||||
|
//
|
||||||
|
// The returned slice cookies should all be added to the Client's cookies or
|
||||||
|
// server's response.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
// s := NewCookieSigner(keyID, privKey)
|
||||||
|
//
|
||||||
|
// policy := &sign.Policy{
|
||||||
|
// Statements: []sign.Statement{
|
||||||
|
// {
|
||||||
|
// // Read the provided documentation on how to set this
|
||||||
|
// // correctly, you'll probably want to use wildcards.
|
||||||
|
// Resource: RawCloudFrontURL,
|
||||||
|
// Condition: sign.Condition{
|
||||||
|
// // Optional IP source address range
|
||||||
|
// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"},
|
||||||
|
// // Optional date URL is not valid until
|
||||||
|
// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)},
|
||||||
|
// // Required date the URL will expire after
|
||||||
|
// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)},
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Get Signed cookies for a resource that will expire in 1 hour
|
||||||
|
// cookies, err := s.SignWithPolicy(policy)
|
||||||
|
// if err != nil {
|
||||||
|
// fmt.Println("failed to create signed cookies", err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Or get Signed cookies for a resource that will expire in 1 hour
|
||||||
|
// // and set path and domain of cookies
|
||||||
|
// cookies, err := s.Sign(policy, func(o *sign.CookieOptions) {
|
||||||
|
// o.Path = "/"
|
||||||
|
// o.Domain = ".example.com"
|
||||||
|
// })
|
||||||
|
// if err != nil {
|
||||||
|
// fmt.Println("failed to create signed cookies", err)
|
||||||
|
// return
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Server Response via http.ResponseWriter
|
||||||
|
// for _, c := range cookies {
|
||||||
|
// http.SetCookie(w, c)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// // Client request via the cookie jar
|
||||||
|
// if client.CookieJar != nil {
|
||||||
|
// for _, c := range cookies {
|
||||||
|
// client.Cookie(w, c)
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
func (s CookieSigner) SignWithPolicy(p *Policy, opts ...func(*CookieOptions)) ([]*http.Cookie, error) {
|
||||||
|
return createCookies(p, s.keyID, s.privKey, s.Opts.apply(opts...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepares the cookies to be attached to the header. An (optional) options
|
||||||
|
// struct is provided in case people don't want to manually edit their cookies.
|
||||||
|
func createCookies(p *Policy, keyID string, privKey *rsa.PrivateKey, opt CookieOptions) ([]*http.Cookie, error) {
|
||||||
|
b64Sig, b64Policy, err := p.Sign(privKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates proper cookies
|
||||||
|
cPolicy := &http.Cookie{
|
||||||
|
Name: CookiePolicyName,
|
||||||
|
Value: string(b64Policy),
|
||||||
|
HttpOnly: true,
|
||||||
|
}
|
||||||
|
cSignature := &http.Cookie{
|
||||||
|
Name: CookieSignatureName,
|
||||||
|
Value: string(b64Sig),
|
||||||
|
HttpOnly: true,
|
||||||
|
}
|
||||||
|
cKey := &http.Cookie{
|
||||||
|
Name: CookieKeyIDName,
|
||||||
|
Value: keyID,
|
||||||
|
HttpOnly: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
cookies := []*http.Cookie{cPolicy, cSignature, cKey}
|
||||||
|
|
||||||
|
// Applie the cookie options
|
||||||
|
for _, c := range cookies {
|
||||||
|
c.Path = opt.Path
|
||||||
|
c.Domain = opt.Domain
|
||||||
|
c.Secure = opt.Secure
|
||||||
|
}
|
||||||
|
|
||||||
|
return cookies, nil
|
||||||
|
}
|
16
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go
generated
vendored
|
@ -90,19 +90,19 @@ func (s URLSigner) Sign(url string, expires time.Time) (string, error) {
|
||||||
// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and
|
// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and
|
||||||
// // restricted to the 192.0.2.0/24 IP address range.
|
// // restricted to the 192.0.2.0/24 IP address range.
|
||||||
// policy := &sign.Policy{
|
// policy := &sign.Policy{
|
||||||
// Statements: []Statement{
|
// Statements: []sign.Statement{
|
||||||
// {
|
// {
|
||||||
// Resource: rawURL,
|
// Resource: rawURL,
|
||||||
// Condition: Condition{
|
// Condition: sign.Condition{
|
||||||
// // Optional IP source address range
|
// // Optional IP source address range
|
||||||
// IPAddress: &IPAddress{SourceIP: "192.0.2.0/24"},
|
// IPAddress: &sign.IPAddress{SourceIP: "192.0.2.0/24"},
|
||||||
// // Optional date URL is not valid until
|
// // Optional date URL is not valid until
|
||||||
// DateGreaterThan: &AWSEpochTime{time.Now().Add(30 * time.Minute)},
|
// DateGreaterThan: &sign.AWSEpochTime{time.Now().Add(30 * time.Minute)},
|
||||||
// // Required date the URL will expire after
|
// // Required date the URL will expire after
|
||||||
// DateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)},
|
// DateLessThan: &sign.AWSEpochTime{time.Now().Add(1 * time.Hour)},
|
||||||
// }
|
// },
|
||||||
// }
|
// },
|
||||||
// }
|
// },
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// signer := sign.NewURLSigner(keyID, privKey)
|
// signer := sign.NewURLSigner(keyID, privKey)
|
||||||
|
|
3352
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
3352
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
File diff suppressed because it is too large
Load diff
21
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
21
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
generated
vendored
|
@ -6,9 +6,13 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
initClient = func(c *client.Client) {
|
initClient = defaultInitClientFn
|
||||||
// Support building custom host-style bucket endpoints
|
initRequest = defaultInitRequestFn
|
||||||
c.Handlers.Build.PushFront(updateHostWithBucket)
|
}
|
||||||
|
|
||||||
|
func defaultInitClientFn(c *client.Client) {
|
||||||
|
// Support building custom endpoints based on config
|
||||||
|
c.Handlers.Build.PushFront(updateEndpointForS3Config)
|
||||||
|
|
||||||
// Require SSL when using SSE keys
|
// Require SSL when using SSE keys
|
||||||
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
c.Handlers.Validate.PushBack(validateSSERequiresSSL)
|
||||||
|
@ -19,9 +23,15 @@ func init() {
|
||||||
c.Handlers.UnmarshalError.PushBack(unmarshalError)
|
c.Handlers.UnmarshalError.PushBack(unmarshalError)
|
||||||
}
|
}
|
||||||
|
|
||||||
initRequest = func(r *request.Request) {
|
func defaultInitRequestFn(r *request.Request) {
|
||||||
|
// Add reuest handlers for specific platforms.
|
||||||
|
// e.g. 100-continue support for PUT requests using Go 1.6
|
||||||
|
platformRequestHandlers(r)
|
||||||
|
|
||||||
switch r.Operation.Name {
|
switch r.Operation.Name {
|
||||||
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration:
|
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
|
||||||
|
opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
|
||||||
|
opPutBucketReplication:
|
||||||
// These S3 operations require Content-MD5 to be set
|
// These S3 operations require Content-MD5 to be set
|
||||||
r.Handlers.Build.PushBack(contentMD5)
|
r.Handlers.Build.PushBack(contentMD5)
|
||||||
case opGetBucketLocation:
|
case opGetBucketLocation:
|
||||||
|
@ -34,4 +44,3 @@ func init() {
|
||||||
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
163
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
163
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
generated
vendored
|
@ -1,14 +1,124 @@
|
||||||
package s3
|
package s3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/awsutil"
|
"github.com/aws/aws-sdk-go/aws/awsutil"
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// an operationBlacklist is a list of operation names that should a
|
||||||
|
// request handler should not be executed with.
|
||||||
|
type operationBlacklist []string
|
||||||
|
|
||||||
|
// Continue will return true of the Request's operation name is not
|
||||||
|
// in the blacklist. False otherwise.
|
||||||
|
func (b operationBlacklist) Continue(r *request.Request) bool {
|
||||||
|
for i := 0; i < len(b); i++ {
|
||||||
|
if b[i] == r.Operation.Name {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var accelerateOpBlacklist = operationBlacklist{
|
||||||
|
opListBuckets, opCreateBucket, opDeleteBucket,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request handler to automatically add the bucket name to the endpoint domain
|
||||||
|
// if possible. This style of bucket is valid for all bucket names which are
|
||||||
|
// DNS compatible and do not contain "."
|
||||||
|
func updateEndpointForS3Config(r *request.Request) {
|
||||||
|
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
|
||||||
|
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
|
||||||
|
|
||||||
|
if accelerate && accelerateOpBlacklist.Continue(r) {
|
||||||
|
if forceHostStyle {
|
||||||
|
if r.Config.Logger != nil {
|
||||||
|
r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
updateEndpointForAccelerate(r)
|
||||||
|
} else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
|
||||||
|
updateEndpointForHostStyle(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateEndpointForHostStyle(r *request.Request) {
|
||||||
|
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||||
|
if !ok {
|
||||||
|
// Ignore operation requests if the bucketname was not provided
|
||||||
|
// if this is an input validation error the validation handler
|
||||||
|
// will report it.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||||
|
// bucket name must be valid to put into the host
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateEndpointForAccelerate(r *request.Request) {
|
||||||
|
bucket, ok := bucketNameFromReqParams(r.Params)
|
||||||
|
if !ok {
|
||||||
|
// Ignore operation requests if the bucketname was not provided
|
||||||
|
// if this is an input validation error the validation handler
|
||||||
|
// will report it.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
|
||||||
|
r.Error = awserr.New("InvalidParameterException",
|
||||||
|
fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket),
|
||||||
|
nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
|
||||||
|
r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
|
||||||
|
moveBucketToHost(r.HTTPRequest.URL, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempts to retrieve the bucket name from the request input parameters.
|
||||||
|
// If no bucket is found, or the field is empty "", false will be returned.
|
||||||
|
func bucketNameFromReqParams(params interface{}) (string, bool) {
|
||||||
|
b, _ := awsutil.ValuesAtPath(params, "Bucket")
|
||||||
|
if len(b) == 0 {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
if bucket, ok := b[0].(*string); ok {
|
||||||
|
if bucketStr := aws.StringValue(bucket); bucketStr != "" {
|
||||||
|
return bucketStr, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// hostCompatibleBucketName returns true if the request should
|
||||||
|
// put the bucket in the host. This is false if S3ForcePathStyle is
|
||||||
|
// explicitly set or if the bucket is not DNS compatible.
|
||||||
|
func hostCompatibleBucketName(u *url.URL, bucket string) bool {
|
||||||
|
// Bucket might be DNS compatible but dots in the hostname will fail
|
||||||
|
// certificate validation, so do not use host-style.
|
||||||
|
if u.Scheme == "https" && strings.Contains(bucket, ".") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the bucket is DNS compatible
|
||||||
|
return dnsCompatibleBucketName(bucket)
|
||||||
|
}
|
||||||
|
|
||||||
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
|
||||||
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
|
||||||
|
|
||||||
|
@ -20,41 +130,36 @@ func dnsCompatibleBucketName(bucket string) bool {
|
||||||
!strings.Contains(bucket, "..")
|
!strings.Contains(bucket, "..")
|
||||||
}
|
}
|
||||||
|
|
||||||
// hostStyleBucketName returns true if the request should put the bucket in
|
// moveBucketToHost moves the bucket name from the URI path to URL host.
|
||||||
// the host. This is false if S3ForcePathStyle is explicitly set or if the
|
func moveBucketToHost(u *url.URL, bucket string) {
|
||||||
// bucket is not DNS compatible.
|
u.Host = bucket + "." + u.Host
|
||||||
func hostStyleBucketName(r *request.Request, bucket string) bool {
|
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
|
||||||
if aws.BoolValue(r.Config.S3ForcePathStyle) {
|
if u.Path == "" {
|
||||||
return false
|
u.Path = "/"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bucket might be DNS compatible but dots in the hostname will fail
|
const s3HostPrefix = "s3"
|
||||||
// certificate validation, so do not use host-style.
|
|
||||||
if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") {
|
// replaceHostRegion replaces the S3 region string in the host with the
|
||||||
return false
|
// value provided. If v is empty the host prefix returned will be s3.
|
||||||
|
func replaceHostRegion(host, v string) string {
|
||||||
|
if !strings.HasPrefix(host, s3HostPrefix) {
|
||||||
|
return host
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBucketLocation should be able to be called from any region within
|
suffix := host[len(s3HostPrefix):]
|
||||||
// a partition, and return the associated region of the bucket.
|
for i := len(s3HostPrefix); i < len(host); i++ {
|
||||||
if r.Operation.Name == opGetBucketLocation {
|
if host[i] == '.' {
|
||||||
return false
|
// Trim until '.' leave the it in place.
|
||||||
|
suffix = host[i:]
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use host-style if the bucket is DNS compatible
|
if len(v) == 0 {
|
||||||
return dnsCompatibleBucketName(bucket)
|
return fmt.Sprintf("s3%s", suffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateHostWithBucket(r *request.Request) {
|
return fmt.Sprintf("s3-%s%s", v, suffix)
|
||||||
b, _ := awsutil.ValuesAtPath(r.Params, "Bucket")
|
|
||||||
if len(b) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) {
|
|
||||||
r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host
|
|
||||||
r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1)
|
|
||||||
if r.HTTPRequest.URL.Path == "" {
|
|
||||||
r.HTTPRequest.URL.Path = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
generated
vendored
Normal file
8
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !go1.6
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import "github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
|
||||||
|
func platformRequestHandlers(r *request.Request) {
|
||||||
|
}
|
28
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
generated
vendored
Normal file
28
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
)
|
||||||
|
|
||||||
|
func platformRequestHandlers(r *request.Request) {
|
||||||
|
if r.Operation.HTTPMethod == "PUT" {
|
||||||
|
// 100-Continue should only be used on put requests.
|
||||||
|
r.Handlers.Sign.PushBack(add100Continue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func add100Continue(r *request.Request) {
|
||||||
|
if aws.BoolValue(r.Config.S3Disable100Continue) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if r.HTTPRequest.ContentLength < 1024*1024*2 {
|
||||||
|
// Ignore requests smaller than 2MB. This helps prevent delaying
|
||||||
|
// requests unnecessarily.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
r.HTTPRequest.Header.Set("Expect", "100-Continue")
|
||||||
|
}
|
4
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/service/s3/service.go
generated
vendored
|
@ -7,8 +7,8 @@ import (
|
||||||
"github.com/aws/aws-sdk-go/aws/client"
|
"github.com/aws/aws-sdk-go/aws/client"
|
||||||
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
"github.com/aws/aws-sdk-go/aws/client/metadata"
|
||||||
"github.com/aws/aws-sdk-go/aws/request"
|
"github.com/aws/aws-sdk-go/aws/request"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/signer/v4"
|
||||||
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
"github.com/aws/aws-sdk-go/private/protocol/restxml"
|
||||||
"github.com/aws/aws-sdk-go/private/signer/v4"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// S3 is a client for Amazon S3.
|
// S3 is a client for Amazon S3.
|
||||||
|
@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handlers
|
// Handlers
|
||||||
svc.Handlers.Sign.PushBack(v4.Sign)
|
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
|
||||||
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
|
svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
|
||||||
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
|
svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
|
||||||
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
|
svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -20,6 +21,7 @@ type xmlErrorResponse struct {
|
||||||
|
|
||||||
func unmarshalError(r *request.Request) {
|
func unmarshalError(r *request.Request) {
|
||||||
defer r.HTTPResponse.Body.Close()
|
defer r.HTTPResponse.Body.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
|
||||||
|
|
||||||
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
|
||||||
r.Error = awserr.NewRequestFailure(
|
r.Error = awserr.NewRequestFailure(
|
||||||
|
|
6
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
generated
vendored
6
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
generated
vendored
|
@ -18,6 +18,12 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
|
||||||
Argument: "",
|
Argument: "",
|
||||||
Expected: 200,
|
Expected: 200,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
State: "success",
|
||||||
|
Matcher: "status",
|
||||||
|
Argument: "",
|
||||||
|
Expected: 301,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
State: "success",
|
State: "success",
|
||||||
Matcher: "status",
|
Matcher: "status",
|
||||||
|
|
|
@ -22,20 +22,8 @@ Package ini provides INI file read and write functionality in Go.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
To use a tagged revision:
|
|
||||||
|
|
||||||
go get gopkg.in/ini.v1
|
go get gopkg.in/ini.v1
|
||||||
|
|
||||||
To use with latest changes:
|
|
||||||
|
|
||||||
go get github.com/go-ini/ini
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
|
|
||||||
If you want to test on your machine, please apply `-t` flag:
|
|
||||||
|
|
||||||
go get -t gopkg.in/ini.v1
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
### Loading from data sources
|
### Loading from data sources
|
||||||
|
@ -107,12 +95,6 @@ Same rule applies to key operations:
|
||||||
key := cfg.Section("").Key("key name")
|
key := cfg.Section("").Key("key name")
|
||||||
```
|
```
|
||||||
|
|
||||||
To check if a key exists:
|
|
||||||
|
|
||||||
```go
|
|
||||||
yes := cfg.Section("").HasKey("key name")
|
|
||||||
```
|
|
||||||
|
|
||||||
To create a new key:
|
To create a new key:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -151,24 +133,12 @@ val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
|
|
||||||
|
|
||||||
```go
|
|
||||||
val := cfg.Section("").Key("key name").Value()
|
|
||||||
```
|
|
||||||
|
|
||||||
To check if raw value exists:
|
|
||||||
|
|
||||||
```go
|
|
||||||
yes := cfg.Section("").HasValue("test value")
|
|
||||||
```
|
|
||||||
|
|
||||||
To get value with types:
|
To get value with types:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// For boolean values:
|
// For boolean values:
|
||||||
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
|
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
|
||||||
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
|
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
|
||||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
v, err = cfg.Section("").Key("BOOL").Bool()
|
||||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
||||||
v, err = cfg.Section("").Key("INT").Int()
|
v, err = cfg.Section("").Key("INT").Int()
|
||||||
|
@ -494,7 +464,7 @@ type Info struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
|
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
|
@ -15,20 +15,8 @@
|
||||||
|
|
||||||
## 下载安装
|
## 下载安装
|
||||||
|
|
||||||
使用一个特定版本:
|
|
||||||
|
|
||||||
go get gopkg.in/ini.v1
|
go get gopkg.in/ini.v1
|
||||||
|
|
||||||
使用最新版:
|
|
||||||
|
|
||||||
go get github.com/go-ini/ini
|
|
||||||
|
|
||||||
### 测试安装
|
|
||||||
|
|
||||||
如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
|
|
||||||
|
|
||||||
go get -t gopkg.in/ini.v1
|
|
||||||
|
|
||||||
## 开始使用
|
## 开始使用
|
||||||
|
|
||||||
### 从数据源加载
|
### 从数据源加载
|
||||||
|
@ -100,12 +88,6 @@ key, err := cfg.Section("").GetKey("key name")
|
||||||
key := cfg.Section("").Key("key name")
|
key := cfg.Section("").Key("key name")
|
||||||
```
|
```
|
||||||
|
|
||||||
判断某个键是否存在:
|
|
||||||
|
|
||||||
```go
|
|
||||||
yes := cfg.Section("").HasKey("key name")
|
|
||||||
```
|
|
||||||
|
|
||||||
创建一个新的键:
|
创建一个新的键:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
@ -144,24 +126,12 @@ val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
||||||
})
|
})
|
||||||
```
|
```
|
||||||
|
|
||||||
如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
|
|
||||||
|
|
||||||
```go
|
|
||||||
val := cfg.Section("").Key("key name").Value()
|
|
||||||
```
|
|
||||||
|
|
||||||
判断某个原值是否存在:
|
|
||||||
|
|
||||||
```go
|
|
||||||
yes := cfg.Section("").HasValue("test value")
|
|
||||||
```
|
|
||||||
|
|
||||||
获取其它类型的值:
|
获取其它类型的值:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// 布尔值的规则:
|
// 布尔值的规则:
|
||||||
// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
|
// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
|
||||||
// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
|
// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
|
||||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
v, err = cfg.Section("").Key("BOOL").Bool()
|
||||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
||||||
v, err = cfg.Section("").Key("INT").Int()
|
v, err = cfg.Section("").Key("INT").Int()
|
||||||
|
@ -485,7 +455,7 @@ type Info struct{
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
|
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
|
@ -16,6 +16,7 @@
|
||||||
package ini
|
package ini
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -34,7 +35,7 @@ const (
|
||||||
// Maximum allowed depth when recursively substituing variable names.
|
// Maximum allowed depth when recursively substituing variable names.
|
||||||
_DEPTH_VALUES = 99
|
_DEPTH_VALUES = 99
|
||||||
|
|
||||||
_VERSION = "1.8.6"
|
_VERSION = "1.6.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Version() string {
|
func Version() string {
|
||||||
|
@ -163,14 +164,14 @@ func (k *Key) Validate(fn func(string) string) string {
|
||||||
|
|
||||||
// parseBool returns the boolean value represented by the string.
|
// parseBool returns the boolean value represented by the string.
|
||||||
//
|
//
|
||||||
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
|
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On,
|
||||||
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
|
// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off.
|
||||||
// Any other value returns an error.
|
// Any other value returns an error.
|
||||||
func parseBool(str string) (value bool, err error) {
|
func parseBool(str string) (value bool, err error) {
|
||||||
switch str {
|
switch str {
|
||||||
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
|
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On":
|
||||||
return true, nil
|
return true, nil
|
||||||
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
|
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off":
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
|
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
|
||||||
|
@ -453,7 +454,7 @@ func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
|
||||||
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
|
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Strings returns list of string divided by given delimiter.
|
// Strings returns list of string devide by given delimiter.
|
||||||
func (k *Key) Strings(delim string) []string {
|
func (k *Key) Strings(delim string) []string {
|
||||||
str := k.String()
|
str := k.String()
|
||||||
if len(str) == 0 {
|
if len(str) == 0 {
|
||||||
|
@ -467,7 +468,7 @@ func (k *Key) Strings(delim string) []string {
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Float64s returns list of float64 divided by given delimiter.
|
// Float64s returns list of float64 devide by given delimiter.
|
||||||
func (k *Key) Float64s(delim string) []float64 {
|
func (k *Key) Float64s(delim string) []float64 {
|
||||||
strs := k.Strings(delim)
|
strs := k.Strings(delim)
|
||||||
vals := make([]float64, len(strs))
|
vals := make([]float64, len(strs))
|
||||||
|
@ -477,7 +478,7 @@ func (k *Key) Float64s(delim string) []float64 {
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ints returns list of int divided by given delimiter.
|
// Ints returns list of int devide by given delimiter.
|
||||||
func (k *Key) Ints(delim string) []int {
|
func (k *Key) Ints(delim string) []int {
|
||||||
strs := k.Strings(delim)
|
strs := k.Strings(delim)
|
||||||
vals := make([]int, len(strs))
|
vals := make([]int, len(strs))
|
||||||
|
@ -487,7 +488,7 @@ func (k *Key) Ints(delim string) []int {
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Int64s returns list of int64 divided by given delimiter.
|
// Int64s returns list of int64 devide by given delimiter.
|
||||||
func (k *Key) Int64s(delim string) []int64 {
|
func (k *Key) Int64s(delim string) []int64 {
|
||||||
strs := k.Strings(delim)
|
strs := k.Strings(delim)
|
||||||
vals := make([]int64, len(strs))
|
vals := make([]int64, len(strs))
|
||||||
|
@ -497,18 +498,18 @@ func (k *Key) Int64s(delim string) []int64 {
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uints returns list of uint divided by given delimiter.
|
// Uints returns list of uint devide by given delimiter.
|
||||||
func (k *Key) Uints(delim string) []uint {
|
func (k *Key) Uints(delim string) []uint {
|
||||||
strs := k.Strings(delim)
|
strs := k.Strings(delim)
|
||||||
vals := make([]uint, len(strs))
|
vals := make([]uint, len(strs))
|
||||||
for i := range strs {
|
for i := range strs {
|
||||||
u, _ := strconv.ParseUint(strs[i], 10, 0)
|
u, _ := strconv.ParseUint(strs[i], 10, 64)
|
||||||
vals[i] = uint(u)
|
vals[i] = uint(u)
|
||||||
}
|
}
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uint64s returns list of uint64 divided by given delimiter.
|
// Uint64s returns list of uint64 devide by given delimiter.
|
||||||
func (k *Key) Uint64s(delim string) []uint64 {
|
func (k *Key) Uint64s(delim string) []uint64 {
|
||||||
strs := k.Strings(delim)
|
strs := k.Strings(delim)
|
||||||
vals := make([]uint64, len(strs))
|
vals := make([]uint64, len(strs))
|
||||||
|
@ -518,7 +519,7 @@ func (k *Key) Uint64s(delim string) []uint64 {
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
|
// TimesFormat parses with given format and returns list of time.Time devide by given delimiter.
|
||||||
func (k *Key) TimesFormat(format, delim string) []time.Time {
|
func (k *Key) TimesFormat(format, delim string) []time.Time {
|
||||||
strs := k.Strings(delim)
|
strs := k.Strings(delim)
|
||||||
vals := make([]time.Time, len(strs))
|
vals := make([]time.Time, len(strs))
|
||||||
|
@ -528,20 +529,14 @@ func (k *Key) TimesFormat(format, delim string) []time.Time {
|
||||||
return vals
|
return vals
|
||||||
}
|
}
|
||||||
|
|
||||||
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
|
// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter.
|
||||||
func (k *Key) Times(delim string) []time.Time {
|
func (k *Key) Times(delim string) []time.Time {
|
||||||
return k.TimesFormat(time.RFC3339, delim)
|
return k.TimesFormat(time.RFC3339, delim)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetValue changes key value.
|
// SetValue changes key value.
|
||||||
func (k *Key) SetValue(v string) {
|
func (k *Key) SetValue(v string) {
|
||||||
if k.s.f.BlockMode {
|
|
||||||
k.s.f.lock.Lock()
|
|
||||||
defer k.s.f.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
k.value = v
|
k.value = v
|
||||||
k.s.keysHash[k.name] = v
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// _________ __ .__
|
// _________ __ .__
|
||||||
|
@ -623,32 +618,6 @@ func (s *Section) GetKey(name string) (*Key, error) {
|
||||||
return key, nil
|
return key, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasKey returns true if section contains a key with given name.
|
|
||||||
func (s *Section) HasKey(name string) bool {
|
|
||||||
key, _ := s.GetKey(name)
|
|
||||||
return key != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Haskey is a backwards-compatible name for HasKey.
|
|
||||||
func (s *Section) Haskey(name string) bool {
|
|
||||||
return s.HasKey(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasValue returns true if section contains given raw value.
|
|
||||||
func (s *Section) HasValue(value string) bool {
|
|
||||||
if s.f.BlockMode {
|
|
||||||
s.f.lock.RLock()
|
|
||||||
defer s.f.lock.RUnlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, k := range s.keys {
|
|
||||||
if value == k.value {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key assumes named Key exists in section and returns a zero-value when not.
|
// Key assumes named Key exists in section and returns a zero-value when not.
|
||||||
func (s *Section) Key(name string) *Key {
|
func (s *Section) Key(name string) *Key {
|
||||||
key, err := s.GetKey(name)
|
key, err := s.GetKey(name)
|
||||||
|
@ -768,10 +737,7 @@ func Load(source interface{}, others ...interface{}) (_ *File, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
f := newFile(sources)
|
f := newFile(sources)
|
||||||
if err = f.Reload(); err != nil {
|
return f, f.Reload()
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Empty returns an empty file object.
|
// Empty returns an empty file object.
|
||||||
|
@ -877,6 +843,240 @@ func (f *File) DeleteSection(name string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cutComment(str string) string {
|
||||||
|
i := strings.Index(str, "#")
|
||||||
|
if i == -1 {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
return str[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) {
|
||||||
|
isEnd := false
|
||||||
|
for {
|
||||||
|
next, err := buf.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
isEnd = true
|
||||||
|
}
|
||||||
|
pos := strings.LastIndex(next, valQuote)
|
||||||
|
if pos > -1 {
|
||||||
|
val += next[:pos]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val += next
|
||||||
|
if isEnd {
|
||||||
|
return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) {
|
||||||
|
isEnd := false
|
||||||
|
for {
|
||||||
|
valLen := len(val)
|
||||||
|
if valLen == 0 || val[valLen-1] != '\\' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val = val[:valLen-1]
|
||||||
|
|
||||||
|
next, err := buf.ReadString('\n')
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return "", isEnd, err
|
||||||
|
}
|
||||||
|
isEnd = true
|
||||||
|
}
|
||||||
|
|
||||||
|
next = strings.TrimSpace(next)
|
||||||
|
if len(next) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
val += next
|
||||||
|
}
|
||||||
|
return val, isEnd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parse parses data through an io.Reader.
|
||||||
|
func (f *File) parse(reader io.Reader) error {
|
||||||
|
buf := bufio.NewReader(reader)
|
||||||
|
|
||||||
|
// Handle BOM-UTF8.
|
||||||
|
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
|
||||||
|
mask, err := buf.Peek(3)
|
||||||
|
if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
|
||||||
|
buf.Read(mask)
|
||||||
|
}
|
||||||
|
|
||||||
|
count := 1
|
||||||
|
comments := ""
|
||||||
|
isEnd := false
|
||||||
|
|
||||||
|
section, err := f.NewSection(DEFAULT_SECTION)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
line, err := buf.ReadString('\n')
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
length := len(line)
|
||||||
|
|
||||||
|
// Check error and ignore io.EOF just for a moment.
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
return fmt.Errorf("error reading next line: %v", err)
|
||||||
|
}
|
||||||
|
// The last line of file could be an empty line.
|
||||||
|
if length == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
isEnd = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip empty lines.
|
||||||
|
if length == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case line[0] == '#' || line[0] == ';': // Comments.
|
||||||
|
if len(comments) == 0 {
|
||||||
|
comments = line
|
||||||
|
} else {
|
||||||
|
comments += LineBreak + line
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case line[0] == '[' && line[length-1] == ']': // New sction.
|
||||||
|
section, err = f.NewSection(strings.TrimSpace(line[1 : length-1]))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(comments) > 0 {
|
||||||
|
section.Comment = comments
|
||||||
|
comments = ""
|
||||||
|
}
|
||||||
|
// Reset counter.
|
||||||
|
count = 1
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Other possibilities.
|
||||||
|
var (
|
||||||
|
i int
|
||||||
|
keyQuote string
|
||||||
|
kname string
|
||||||
|
valQuote string
|
||||||
|
val string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key name surrounded by quotes.
|
||||||
|
if line[0] == '"' {
|
||||||
|
if length > 6 && line[0:3] == `"""` {
|
||||||
|
keyQuote = `"""`
|
||||||
|
} else {
|
||||||
|
keyQuote = `"`
|
||||||
|
}
|
||||||
|
} else if line[0] == '`' {
|
||||||
|
keyQuote = "`"
|
||||||
|
}
|
||||||
|
if len(keyQuote) > 0 {
|
||||||
|
qLen := len(keyQuote)
|
||||||
|
pos := strings.Index(line[qLen:], keyQuote)
|
||||||
|
if pos == -1 {
|
||||||
|
return fmt.Errorf("error parsing line: missing closing key quote: %s", line)
|
||||||
|
}
|
||||||
|
pos = pos + qLen
|
||||||
|
i = strings.IndexAny(line[pos:], "=:")
|
||||||
|
if i < 0 {
|
||||||
|
return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
|
||||||
|
} else if i == pos {
|
||||||
|
return fmt.Errorf("error parsing line: key is empty: %s", line)
|
||||||
|
}
|
||||||
|
i = i + pos
|
||||||
|
kname = line[qLen:pos] // Just keep spaces inside quotes.
|
||||||
|
} else {
|
||||||
|
i = strings.IndexAny(line, "=:")
|
||||||
|
if i < 0 {
|
||||||
|
return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
|
||||||
|
} else if i == 0 {
|
||||||
|
return fmt.Errorf("error parsing line: key is empty: %s", line)
|
||||||
|
}
|
||||||
|
kname = strings.TrimSpace(line[0:i])
|
||||||
|
}
|
||||||
|
|
||||||
|
isAutoIncr := false
|
||||||
|
// Auto increment.
|
||||||
|
if kname == "-" {
|
||||||
|
isAutoIncr = true
|
||||||
|
kname = "#" + fmt.Sprint(count)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
lineRight := strings.TrimSpace(line[i+1:])
|
||||||
|
lineRightLength := len(lineRight)
|
||||||
|
firstChar := ""
|
||||||
|
if lineRightLength >= 2 {
|
||||||
|
firstChar = lineRight[0:1]
|
||||||
|
}
|
||||||
|
if firstChar == "`" {
|
||||||
|
valQuote = "`"
|
||||||
|
} else if firstChar == `"` {
|
||||||
|
if lineRightLength >= 3 && lineRight[0:3] == `"""` {
|
||||||
|
valQuote = `"""`
|
||||||
|
} else {
|
||||||
|
valQuote = `"`
|
||||||
|
}
|
||||||
|
} else if firstChar == `'` {
|
||||||
|
valQuote = `'`
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(valQuote) > 0 {
|
||||||
|
qLen := len(valQuote)
|
||||||
|
pos := strings.LastIndex(lineRight[qLen:], valQuote)
|
||||||
|
// For multiple-line value check.
|
||||||
|
if pos == -1 {
|
||||||
|
if valQuote == `"` || valQuote == `'` {
|
||||||
|
return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
val = lineRight[qLen:] + "\n"
|
||||||
|
val, err = checkMultipleLines(buf, line, val, valQuote)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
val = lineRight[qLen : pos+qLen]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
val = strings.TrimSpace(cutComment(lineRight))
|
||||||
|
val, isEnd, err = checkContinuationLines(buf, val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
k, err := section.NewKey(kname, val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
k.isAutoIncr = isAutoIncr
|
||||||
|
if len(comments) > 0 {
|
||||||
|
k.Comment = comments
|
||||||
|
comments = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
if isEnd {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (f *File) reload(s dataSource) error {
|
func (f *File) reload(s dataSource) error {
|
||||||
r, err := s.ReadCloser()
|
r, err := s.ReadCloser()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -966,18 +1166,17 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
|
||||||
switch {
|
switch {
|
||||||
case key.isAutoIncr:
|
case key.isAutoIncr:
|
||||||
kname = "-"
|
kname = "-"
|
||||||
case strings.ContainsAny(kname, "\"=:"):
|
case strings.Contains(kname, "`") || strings.Contains(kname, `"`):
|
||||||
kname = "`" + kname + "`"
|
|
||||||
case strings.Contains(kname, "`"):
|
|
||||||
kname = `"""` + kname + `"""`
|
kname = `"""` + kname + `"""`
|
||||||
|
case strings.Contains(kname, `=`) || strings.Contains(kname, `:`):
|
||||||
|
kname = "`" + kname + "`"
|
||||||
}
|
}
|
||||||
|
|
||||||
val := key.value
|
val := key.value
|
||||||
// In case key value contains "\n", "`", "\"", "#" or ";".
|
// In case key value contains "\n", "`" or "\"".
|
||||||
if strings.ContainsAny(val, "\n`") {
|
if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) ||
|
||||||
|
strings.Contains(val, "#") {
|
||||||
val = `"""` + val + `"""`
|
val = `"""` + val + `"""`
|
||||||
} else if strings.ContainsAny(val, "#;") {
|
|
||||||
val = "`" + val + "`"
|
|
||||||
}
|
}
|
||||||
if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil {
|
if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil {
|
||||||
return 0, err
|
return 0, err
|
|
@ -94,14 +94,13 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
|
||||||
field.SetBool(boolVal)
|
field.SetBool(boolVal)
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
durationVal, err := key.Duration()
|
durationVal, err := key.Duration()
|
||||||
// Skip zero value
|
if err == nil {
|
||||||
if err == nil && int(durationVal) > 0 {
|
|
||||||
field.Set(reflect.ValueOf(durationVal))
|
field.Set(reflect.ValueOf(durationVal))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
intVal, err := key.Int64()
|
intVal, err := key.Int64()
|
||||||
if err != nil || intVal == 0 {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
field.SetInt(intVal)
|
field.SetInt(intVal)
|
312
vendor/github.com/go-ini/ini/parser.go
generated
vendored
312
vendor/github.com/go-ini/ini/parser.go
generated
vendored
|
@ -1,312 +0,0 @@
|
||||||
// Copyright 2015 Unknwon
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
|
||||||
// not use this file except in compliance with the License. You may obtain
|
|
||||||
// a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
// License for the specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
|
|
||||||
package ini
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tokenType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
_TOKEN_INVALID tokenType = iota
|
|
||||||
_TOKEN_COMMENT
|
|
||||||
_TOKEN_SECTION
|
|
||||||
_TOKEN_KEY
|
|
||||||
)
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
buf *bufio.Reader
|
|
||||||
isEOF bool
|
|
||||||
count int
|
|
||||||
comment *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newParser(r io.Reader) *parser {
|
|
||||||
return &parser{
|
|
||||||
buf: bufio.NewReader(r),
|
|
||||||
count: 1,
|
|
||||||
comment: &bytes.Buffer{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BOM handles header of BOM-UTF8 format.
|
|
||||||
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
|
|
||||||
func (p *parser) BOM() error {
|
|
||||||
mask, err := p.buf.Peek(3)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return err
|
|
||||||
} else if len(mask) < 3 {
|
|
||||||
return nil
|
|
||||||
} else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
|
|
||||||
p.buf.Read(mask)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) readUntil(delim byte) ([]byte, error) {
|
|
||||||
data, err := p.buf.ReadBytes(delim)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
p.isEOF = true
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanComment(in []byte) ([]byte, bool) {
|
|
||||||
i := bytes.IndexAny(in, "#;")
|
|
||||||
if i == -1 {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return in[i:], true
|
|
||||||
}
|
|
||||||
|
|
||||||
func readKeyName(in []byte) (string, int, error) {
|
|
||||||
line := string(in)
|
|
||||||
|
|
||||||
// Check if key name surrounded by quotes.
|
|
||||||
var keyQuote string
|
|
||||||
if line[0] == '"' {
|
|
||||||
if len(line) > 6 && string(line[0:3]) == `"""` {
|
|
||||||
keyQuote = `"""`
|
|
||||||
} else {
|
|
||||||
keyQuote = `"`
|
|
||||||
}
|
|
||||||
} else if line[0] == '`' {
|
|
||||||
keyQuote = "`"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get out key name
|
|
||||||
endIdx := -1
|
|
||||||
if len(keyQuote) > 0 {
|
|
||||||
startIdx := len(keyQuote)
|
|
||||||
// FIXME: fail case -> """"""name"""=value
|
|
||||||
pos := strings.Index(line[startIdx:], keyQuote)
|
|
||||||
if pos == -1 {
|
|
||||||
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
|
|
||||||
}
|
|
||||||
pos += startIdx
|
|
||||||
|
|
||||||
// Find key-value delimiter
|
|
||||||
i := strings.IndexAny(line[pos+startIdx:], "=:")
|
|
||||||
if i < 0 {
|
|
||||||
return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
|
|
||||||
}
|
|
||||||
endIdx = pos + i
|
|
||||||
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
endIdx = strings.IndexAny(line, "=:")
|
|
||||||
if endIdx < 0 {
|
|
||||||
return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
|
|
||||||
for {
|
|
||||||
data, err := p.readUntil('\n')
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
next := string(data)
|
|
||||||
|
|
||||||
pos := strings.LastIndex(next, valQuote)
|
|
||||||
if pos > -1 {
|
|
||||||
val += next[:pos]
|
|
||||||
|
|
||||||
comment, has := cleanComment([]byte(next[pos:]))
|
|
||||||
if has {
|
|
||||||
p.comment.Write(bytes.TrimSpace(comment))
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
val += next
|
|
||||||
if p.isEOF {
|
|
||||||
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) readContinuationLines(val string) (string, error) {
|
|
||||||
for {
|
|
||||||
data, err := p.readUntil('\n')
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
next := strings.TrimSpace(string(data))
|
|
||||||
|
|
||||||
if len(next) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
val += next
|
|
||||||
if val[len(val)-1] != '\\' {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
val = val[:len(val)-1]
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasSurroundedQuote check if and only if the first and last characters
|
|
||||||
// are quotes \" or \'.
|
|
||||||
// It returns false if any other parts also contain same kind of quotes.
|
|
||||||
func hasSurroundedQuote(in string, quote byte) bool {
|
|
||||||
return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
|
|
||||||
strings.IndexByte(in[1:], quote) == len(in)-2
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) readValue(in []byte) (string, error) {
|
|
||||||
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
|
|
||||||
if len(line) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var valQuote string
|
|
||||||
if len(line) > 3 && string(line[0:3]) == `"""` {
|
|
||||||
valQuote = `"""`
|
|
||||||
} else if line[0] == '`' {
|
|
||||||
valQuote = "`"
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(valQuote) > 0 {
|
|
||||||
startIdx := len(valQuote)
|
|
||||||
pos := strings.LastIndex(line[startIdx:], valQuote)
|
|
||||||
// Check for multi-line value
|
|
||||||
if pos == -1 {
|
|
||||||
return p.readMultilines(line, line[startIdx:], valQuote)
|
|
||||||
}
|
|
||||||
|
|
||||||
return line[startIdx : pos+startIdx], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Won't be able to reach here if value only contains whitespace.
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
|
|
||||||
// Check continuation lines
|
|
||||||
if line[len(line)-1] == '\\' {
|
|
||||||
return p.readContinuationLines(line[:len(line)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
i := strings.IndexAny(line, "#;")
|
|
||||||
if i > -1 {
|
|
||||||
p.comment.WriteString(line[i:])
|
|
||||||
line = strings.TrimSpace(line[:i])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trim single quotes
|
|
||||||
if hasSurroundedQuote(line, '\'') ||
|
|
||||||
hasSurroundedQuote(line, '"') {
|
|
||||||
line = line[1 : len(line)-1]
|
|
||||||
}
|
|
||||||
return line, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse parses data through an io.Reader.
|
|
||||||
func (f *File) parse(reader io.Reader) (err error) {
|
|
||||||
p := newParser(reader)
|
|
||||||
if err = p.BOM(); err != nil {
|
|
||||||
return fmt.Errorf("BOM: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ignore error because default section name is never empty string.
|
|
||||||
section, _ := f.NewSection(DEFAULT_SECTION)
|
|
||||||
|
|
||||||
var line []byte
|
|
||||||
for !p.isEOF {
|
|
||||||
line, err = p.readUntil('\n')
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
|
|
||||||
if len(line) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Comments
|
|
||||||
if line[0] == '#' || line[0] == ';' {
|
|
||||||
// Note: we do not care ending line break,
|
|
||||||
// it is needed for adding second line,
|
|
||||||
// so just clean it once at the end when set to value.
|
|
||||||
p.comment.Write(line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Section
|
|
||||||
if line[0] == '[' {
|
|
||||||
// Read to the next ']' (TODO: support quoted strings)
|
|
||||||
closeIdx := bytes.IndexByte(line, ']')
|
|
||||||
if closeIdx == -1 {
|
|
||||||
return fmt.Errorf("unclosed section: %s", line)
|
|
||||||
}
|
|
||||||
|
|
||||||
section, err = f.NewSection(string(line[1:closeIdx]))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
comment, has := cleanComment(line[closeIdx+1:])
|
|
||||||
if has {
|
|
||||||
p.comment.Write(comment)
|
|
||||||
}
|
|
||||||
|
|
||||||
section.Comment = strings.TrimSpace(p.comment.String())
|
|
||||||
|
|
||||||
// Reset aotu-counter and comments
|
|
||||||
p.comment.Reset()
|
|
||||||
p.count = 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
kname, offset, err := readKeyName(line)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Auto increment.
|
|
||||||
isAutoIncr := false
|
|
||||||
if kname == "-" {
|
|
||||||
isAutoIncr = true
|
|
||||||
kname = "#" + strconv.Itoa(p.count)
|
|
||||||
p.count++
|
|
||||||
}
|
|
||||||
|
|
||||||
key, err := section.NewKey(kname, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
key.isAutoIncr = isAutoIncr
|
|
||||||
|
|
||||||
value, err := p.readValue(line[offset:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
key.SetValue(value)
|
|
||||||
key.Comment = strings.TrimSpace(p.comment.String())
|
|
||||||
p.comment.Reset()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
Loading…
Reference in a new issue