diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index db88cb2a..f6411400 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -50,6 +50,41 @@ "Comment": "v0.7.3", "Rev": "55eb11d21d2a31a3cc93838241d04800f52e823d" }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/endpoints", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/waiter", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/cloudfront/sign", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/s3", + "Comment": "v1.1.0-14-g49c3892", + "Rev": "49c3892b61af1d4996292a3025f36e4dfa25eaee" + }, { "ImportPath": "github.com/bugsnag/bugsnag-go", "Comment": "v1.0.2-5-gb1d1530", @@ -79,10 +114,6 @@ "ImportPath": "github.com/docker/goamz/aws", "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" }, - { - "ImportPath": "github.com/docker/goamz/cloudfront", - "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" - }, { "ImportPath": "github.com/docker/goamz/s3", "Rev": "f0a21f5b2e12f83a505ecf79b633bb2035cf6f85" @@ -99,6 +130,11 @@ "ImportPath": "github.com/garyburd/redigo/redis", "Rev": "535138d7bcd717d6531c701ef5933d98b1866257" }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v1.8.6", + "Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3" + }, { "ImportPath": "github.com/gorilla/context", "Rev": "14f550f51af52180c2eefed15e5fd18d63c0a64a" @@ -119,6 +155,11 @@ "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "482a9fd5fa83e8c4e7817413b80f3eb8feec03ef" }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2-12-g0b12d6b", + "Rev": "0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74" + }, { "ImportPath": "github.com/ncw/swift", "Rev": "c54732e87b0b283d1baf0a18db689d0aea460ba3" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000..1d2c0d8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,127 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with code, message, +// and original errors. Calling Error() will only return the error that is at the end +// of the list. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + if e, ok := origErr.(Error); ok && e != nil { + return e + } + return newBaseError(code, message, origErr) +} + +// NewBatchError returns an baseError with an expectation of an array of errors +func NewBatchError(code, message string, errs []error) BatchError { + return newBaseErrors(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Printf("Error:", err.Error() +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000..605f73c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,197 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and err. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErr is the error object which will be nested under the new error to be returned. +func newBaseError(code, message string, origErr error) *baseError { + b := &baseError{ + code: code, + message: message, + } + + if origErr != nil { + b.errs = append(b.errs, origErr) + } + + return b +} + +// newBaseErrors returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErrs is the error objects which will be nested under the new errors to be returned. +func newBaseErrors(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no error +// was set. This only returns the first element in the list. If the full list is +// needed, use BatchError +func (b baseError) OrigErr() error { + if size := len(b.errs); size > 0 { + return b.errs[0] + } + + return nil +} + +// OrigErrs returns the original errors if one was set. An empty slice is returned if +// no error was set:w +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for request +// status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000..8429470b --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,100 @@ +package awsutil + +import ( + "io" + "reflect" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + dst.Set(reflect.New(e)) + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000..59fa4a55 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000..4d2a01e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, value := range values { + value := reflect.Indirect(value) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000..0de3eaa0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,103 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000..b6432f1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 00000000..c8d0564d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,120 @@ +package client + +import ( + "fmt" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint, SigningRegion string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers, + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFront(logRequest) + c.Handlers.Send.PushBack(logResponse) +} + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + var msg = "no response data" + if r.HTTPResponse != nil { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) + msg = string(dumpedBody) + } else if r.Error != nil { + msg = r.Error.Error() + } + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000..24d39ce5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,45 @@ +package client + +import ( + "math" + "math/rand" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + delay := int(math.Pow(2, float64(r.RetryCount))) * (rand.Intn(30) + 30) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000..4778056d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 00000000..9e83e926 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,311 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// UseServiceDefaultRetries instructs the config to use the service's own default +// number of retries. This will be the default action if Config.MaxRetries +// is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the {defaults.DefaultConfig} structure. +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to + // a chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service specific + // configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for missing + // required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will + // use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + SleepDelay func(time.Duration) +} + +// NewConfig returns a new Config pointer that can be chained with builder methods to +// set multiple configuration values inline without using pointers. +// +// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 00000000..d6a7b08d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,357 @@ +package aws + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000..1d3e656f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,139 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ := strconv.ParseInt(slength, 10, 64) + r.HTTPRequest.ContentLength = length + return + } + + var length int64 + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { + var err error + r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) + if err != nil { + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other url redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + } +}} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + r.Config.SleepDelay(r.RetryDelay) + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000..3b53f5e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,144 @@ +package corehandlers + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if r.ParamsFilled() { + v := validator{errors: []string{}} + v.validateAny(reflect.ValueOf(r.Params), "") + + if count := len(v.errors); count > 0 { + format := "%d validation errors:\n- %s" + msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) + r.Error = awserr.New("InvalidParameter", msg, nil) + } + } +}} + +// A validator validates values. Collects validations errors which occurs. +type validator struct { + errors []string +} + +// validateAny will validate any struct, slice or map type. All validations +// are also performed recursively for nested types. +func (v *validator) validateAny(value reflect.Value, path string) { + value = reflect.Indirect(value) + if !value.IsValid() { + return + } + + switch value.Kind() { + case reflect.Struct: + v.validateStruct(value, path) + case reflect.Slice: + for i := 0; i < value.Len(); i++ { + v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) + } + case reflect.Map: + for _, n := range value.MapKeys() { + v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) + } + } +} + +// validateStruct will validate the struct value's fields. If the structure has +// nested types those types will be validated also. +func (v *validator) validateStruct(value reflect.Value, path string) { + prefix := "." + if path == "" { + prefix = "" + } + + for i := 0; i < value.Type().NumField(); i++ { + f := value.Type().Field(i) + if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { + continue + } + fvalue := value.FieldByName(f.Name) + + err := validateField(f, fvalue, validateFieldRequired, validateFieldMin) + if err != nil { + v.errors = append(v.errors, fmt.Sprintf("%s: %s", err.Error(), path+prefix+f.Name)) + continue + } + + v.validateAny(fvalue, path+prefix+f.Name) + } +} + +type validatorFunc func(f reflect.StructField, fvalue reflect.Value) error + +func validateField(f reflect.StructField, fvalue reflect.Value, funcs ...validatorFunc) error { + for _, fn := range funcs { + if err := fn(f, fvalue); err != nil { + return err + } + } + return nil +} + +// Validates that a field has a valid value provided for required fields. +func validateFieldRequired(f reflect.StructField, fvalue reflect.Value) error { + if f.Tag.Get("required") == "" { + return nil + } + + switch fvalue.Kind() { + case reflect.Ptr, reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return fmt.Errorf("missing required parameter") + } + default: + if !fvalue.IsValid() { + return fmt.Errorf("missing required parameter") + } + } + return nil +} + +// Validates that if a value is provided for a field, that value must be at +// least a minimum length. +func validateFieldMin(f reflect.StructField, fvalue reflect.Value) error { + minStr := f.Tag.Get("min") + if minStr == "" { + return nil + } + min, _ := strconv.ParseInt(minStr, 10, 64) + + kind := fvalue.Kind() + if kind == reflect.Ptr { + if fvalue.IsNil() { + return nil + } + fvalue = fvalue.Elem() + } + + switch fvalue.Kind() { + case reflect.String: + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + case reflect.Slice, reflect.Map: + if fvalue.IsNil() { + return nil + } + if int64(fvalue.Len()) < min { + return fmt.Errorf("field too short, minimum length %d", min) + } + + // TODO min can also apply to number minimum value. + + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000..857311f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// vai the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000..7b8ebf5f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000..36f8add3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "failed to list EC2 Roles", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read list of EC2 Roles", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000..96655bc4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000..7fc91d9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000..7fb7cbf0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000..71189e73 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,48 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set pragmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + s.Value.ProviderName = StaticProviderName + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000..4d6408b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,134 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + + roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + }) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000..42c883aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,96 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "net/http" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithSleepDelay(time.Sleep) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) + + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + }, + }}) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000..e5137ca1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,43 @@ +package ec2metadata + +import ( + "path" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000..9e16c1cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,117 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "io/ioutil" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + } + + data := r.Data.(*metadataOutput) + data.Content = string(b) +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + _, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + } + + // TODO extract the error... +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 00000000..57663616 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 00000000..f5369487 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,98 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000..3e90a797 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,140 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + var n HandlerList + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for _, f := range l.list { + f.Fn(r) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 00000000..61bb4a8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,294 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + + built bool +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + p := operation.HTTPPath + if p == "" { + p = "/" + } + + httpReq, _ := http.NewRequest(method, "", nil) + httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: nil, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.HTTPRequest.Body = ioutil.NopCloser(reader) + r.Body = reader +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Error = nil + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + r.built = true + } + + return r.Error +} + +// Sign will sign the request retuning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +func (r *Request) Send() error { + for { + r.Sign() + if r.Error != nil { + return r.Error + } + + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // Re-seek the body back to the original point in for a retry so that + // send will send the body's contents again in the upcoming request. + r.Body.Seek(r.BodyStart, 0) + r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + } + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000..2939ec47 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000..ab6fff5a --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,82 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000..47e4536f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,120 @@ +// Package session provides a way to create service clients with shared configuration +// and handlers. +// +// Generally this package should be used instead of the `defaults` package. +// +// A session should be used to share configurations and request handlers between multiple +// service clients. When service clients need specific configuration aws.Config can be +// used to provide additional configuration directly to the service client. +package session + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/endpoints" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the session concurrently. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided Configs +// on top of the SDK's default configurations. Once the session is created it +// can be mutated to modify Configs or Handlers. The session is safe to be read +// concurrently, but it should not be written to concurrently. +// +// Example: +// // Create a session with the default config and request handlers. +// sess := session.New() +// +// // Create a session with a custom region +// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// +// // Create a session, and add additional handlers for all service +// // clients created with the session to inherit. Adds logging handler. +// sess := session.New() +// sess.Handlers.Send.PushFront(func(r *request.Request) { +// // Log every request made and its payload +// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// }) +// +// // Create a S3 client instance from a session +// sess := session.New() +// svc := s3.New(sess) +func New(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the session's copied config. +// +// Example: +// // Create a copy of the current session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2"}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +// +// Example: +// sess := session.New() +// s3.New(sess) +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + endpoint, signingRegion := endpoints.NormalizeEndpoint( + aws.StringValue(s.Config.Endpoint), serviceName, + aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: endpoint, + SigningRegion: signingRegion, + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 00000000..0f067c57 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,88 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + expLen := pos + int64(len(p)) + if int64(len(b.buf)) < expLen { + newBuf := make([]byte, expLen) + copy(newBuf, b.buf) + b.buf = newBuf + } + copy(b.buf[pos:], p) + return len(p), nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf[:len(b.buf):len(b.buf)] +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 00000000..ad215ea8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.1.0" diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 00000000..2b279e65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,65 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { + derivedKeys := []string{ + region + "/" + svcName, + region + "/*", + "*/" + svcName, + "*/*", + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 00000000..7819eedc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,92 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest", + "signingRegion": "us-east-1" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "us-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-west-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-southeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "ap-northeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "sa-east-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com", + "signatureVersion": "v4" + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 00000000..9b2e1b69 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,104 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + SigningRegion: "us-east-1", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "ap-northeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-northeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "ap-southeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "eu-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "sa-east-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + "us-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-west-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + }, +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go new file mode 100644 index 00000000..68e344d1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -0,0 +1,35 @@ +// Package ec2query provides serialisation of AWS EC2 requests and responses. +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building ec2query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.ec2query.Build", Fn: Build} + +// Build builds a request for the EC2 protocol. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, true); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding EC2 Query request", err) + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go new file mode 100644 index 00000000..9223e842 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/ec2query/unmarshal.go @@ -0,0 +1,63 @@ +package ec2query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling ec2query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.ec2query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling ec2query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling ec2query protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.ec2query.UnmarshalError", Fn: UnmarshalError} + +// Unmarshal unmarshals a response body for the EC2 protocol. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals response headers for the EC2 protocol. +func UnmarshalMeta(r *request.Request) { + // TODO implement unmarshaling of request IDs +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Code string `xml:"Errors>Error>Code"` + Message string `xml:"Errors>Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalError unmarshals a response error for the EC2 protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed decoding EC2 Query error response", err) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000..53831dff --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000..1d552afe --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,251 @@ +// Package jsonutil provides JSON serialisation of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(value, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + fmt.Fprintf(buf, "%q:", name) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + var sv sortedValues = value.MapKeys() + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + fmt.Fprintf(buf, "%q:", k) + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + switch value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + buf.WriteString(strconv.FormatBool(value.Bool())) + case reflect.Int64: + buf.WriteString(strconv.FormatInt(value.Int(), 10)) + case reflect.Float64: + buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) + default: + switch value.Type() { + case timeType: + converted := value.Interface().(time.Time) + buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) + case byteSliceType: + if !value.IsNil() { + converted := value.Interface().([]byte) + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for _, r := range s { + if r == '"' { + buf.WriteString(`\"`) + } else if r == '\\' { + buf.WriteString(`\\`) + } else if r == '\b' { + buf.WriteString(`\b`) + } else if r == '\f' { + buf.WriteString(`\f`) + } else if r == '\r' { + buf.WriteString(`\r`) + } else if r == '\t' { + buf.WriteString(`\t`) + } else if r == '\n' { + buf.WriteString(`\n`) + } else if r < 32 { + fmt.Fprintf(buf, "\\u%0.4x", r) + } else { + buf.WriteRune(r) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000..fea53561 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,213 @@ +package jsonutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + "time" +) + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + b, err := ioutil.ReadAll(stream) + if err != nil { + return err + } + + if len(b) == 0 { + return nil + } + + if err := json.Unmarshal(b, &out); err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + errf := func() error { + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + default: + return errf() + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return errf() + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return errf() + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000..7aff0e0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,111 @@ +// Package jsonrpc provides JSON RPC utilities for serialisation of AWS +// requests and responses. +package jsonrpc + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + if req.ClientInfo.JSONVersion != "" { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) + return + } + if len(bodyBytes) == 0 { + req.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", req.HTTPResponse.Status, nil), + req.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000..56d69db0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialisation of AWS query requests, and responses. +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + return + } + + if r.ExpireTime == 0 { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000..60ea0bd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,230 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".member" + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + case float32: + v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + v.Set(name, value.UTC().Format(ISO8601UTC)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000..a3ea4095 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000..b8e3c7a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,40 @@ +package query + +import ( + "encoding/xml" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) + } else { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000..5469edb6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,257 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value) { + query := r.HTTPRequest.URL.Query() + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if m.Kind() == reflect.Ptr { + m = m.Elem() + } + if !m.IsValid() { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name) + case "querystring": + err = buildQueryString(query, m, name) + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string) error { + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key)) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string) error { + value, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + uri := u.Path + uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) + uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) + u.Path = uri + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func updatePath(url *url.URL, urlPath string) { + scheme, query := url.Scheme, url.RawQuery + + hasSlash := strings.HasSuffix(urlPath, "/") + + // clean up path + urlPath = path.Clean(urlPath) + if hasSlash && !strings.HasSuffix(urlPath, "/") { + urlPath += "/" + } + + // get formatted URL minus scheme so we can build this into Opaque + url.Scheme, url.Path, url.RawQuery = "", "", "" + s := url.String() + url.Scheme = scheme + url.RawQuery = query + + // build opaque URI + url.Opaque = s + urlPath +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + buf.WriteByte('%') + buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) + } + } + return buf.String() +} + +func convertType(v reflect.Value) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000..4366de2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000..ba46f5bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,193 @@ +package rest + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadSeeker": + payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) + case "aws.ReadSeekCloser", "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + default: + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string) error { + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 00000000..5a0877cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,91 @@ +// Package restjson provides RESTful JSON serialisation of AWS +// requests and responses. +package restjson + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "encoding/json" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restjson.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restjson protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restjson.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restjson.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + code := r.HTTPResponse.Header.Get("X-Amzn-Errortype") + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading REST JSON error response", err) + return + } + if len(bodyBytes) == 0 { + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + var jsonErr jsonErrorResponse + if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { + r.Error = awserr.New("SerializationError", "failed decoding REST JSON error response", err) + return + } + + if code == "" { + code = jsonErr.Code + } + + code = strings.SplitN(code, ":", 2)[0] + r.Error = awserr.NewRequestFailure( + awserr.New(code, jsonErr.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 00000000..c74088bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialisation of AWS +// requests and responses. +package restxml + +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000..da1a6811 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000..ceb4132c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,293 @@ +// Package xmlutil provides XML serialisation of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +// BuildXML will serialize params into an xml.Encoder. +// Error will be returned if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, false) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + fieldAdded := false + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + + fieldAdded = true + } + + if fieldAdded { // only append this child if we have one ore more valid members + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + str = strconv.FormatFloat(converted, 'f', -1, 64) + case float32: + str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + str = converted.UTC().Format(ISO8601UTC) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000..49f291a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,260 @@ +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" +) + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, _ := XMLToStruct(d, nil) + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err := parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + for _, a := range node.Attr { + if name == a.Name.Local { + // turn this into a text node for de-serializing + elems = []*XMLNode{{Text: a.Value}} + } + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + v, err := strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + t, err := time.Parse(ISO8601UTC, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000..72c198a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,105 @@ +package xmlutil + +import ( + "encoding/xml" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if tok == nil || err == io.EOF { + break + } + if err != nil { + return out, err + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + if e != nil { + return out, e + } + node.Name = typed.Name + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + } + } + return out, nil +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) + + if node.Text != "" { + e.EncodeToken(xml.CharData([]byte(node.Text))) + } else if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(xml.EndElement{Name: node.Name}) + return e.Flush() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go new file mode 100644 index 00000000..244c86da --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go new file mode 100644 index 00000000..14b0c661 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go @@ -0,0 +1,438 @@ +// Package v4 implements signing for AWS V4 signer +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Content-Length": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +type signer struct { + Request *http.Request + Time time.Time + ExpireTime time.Duration + ServiceName string + Region string + CredValues credentials.Value + Credentials *credentials.Credentials + Query url.Values + Body io.ReadSeeker + Debug aws.LogLevelType + Logger aws.Logger + + isPresign bool + formattedTime string + formattedShortTime string + + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string + notHoist bool + signedHeaderVals http.Header +} + +// Sign requests with signature version 4. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func Sign(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + s := signer{ + Request: req.HTTPRequest, + Time: req.Time, + ExpireTime: req.ExpireTime, + Query: req.HTTPRequest.URL.Query(), + Body: req.Body, + ServiceName: name, + Region: region, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + notHoist: req.NotHoist, + } + + req.Error = s.sign() + req.SignedHeaderVals = s.signedHeaderVals +} + +func (v4 *signer) sign() error { + if v4.ExpireTime != 0 { + v4.isPresign = true + } + + if v4.isRequestSigned() { + if !v4.Credentials.IsExpired() { + // If the request is already signed, and the credentials have not + // expired yet ignore the signing request. + return nil + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + if v4.isPresign { + v4.removePresign() + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + v4.Request.URL.RawQuery = v4.Query.Encode() + } + } + + var err error + v4.CredValues, err = v4.Credentials.Get() + if err != nil { + return err + } + + if v4.isPresign { + v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if v4.CredValues.SessionToken != "" { + v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } else { + v4.Query.Del("X-Amz-Security-Token") + } + } else if v4.CredValues.SessionToken != "" { + v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) + } + + v4.build() + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *signer) logSigningInfo() { + signedURLMsg := "" + if v4.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (v4 *signer) build() { + + v4.buildTime() // no depends + v4.buildCredentialString() // no depends + + unsignedHeaders := v4.Request.Header + if v4.isPresign { + if !v4.notHoist { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + v4.Query[k] = urlValues[k] + } + } + } + + v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + v4.buildCanonicalString() // depends on canon headers / signed headers + v4.buildStringToSign() // depends on canon string + v4.buildSignature() // depends on string to sign + + if v4.isPresign { + v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, + "SignedHeaders=" + v4.signedHeaders, + "Signature=" + v4.signature, + } + v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (v4 *signer) buildTime() { + v4.formattedTime = v4.Time.UTC().Format(timeFormat) + v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) + + if v4.isPresign { + duration := int64(v4.ExpireTime / time.Second) + v4.Query.Set("X-Amz-Date", v4.formattedTime) + v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) + } +} + +func (v4 *signer) buildCredentialString() { + v4.credentialString = strings.Join([]string{ + v4.formattedShortTime, + v4.Region, + v4.ServiceName, + "aws4_request", + }, "/") + + if v4.isPresign { + v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + + lowerCaseKey := strings.ToLower(k) + headers = append(headers, lowerCaseKey) + + if v4.signedHeaderVals == nil { + v4.signedHeaderVals = make(http.Header) + } + v4.signedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + v4.signedHeaders = strings.Join(headers, ";") + + if v4.isPresign { + v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + v4.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") + } + } + + v4.canonicalHeaders = strings.Join(headerValues, "\n") +} + +func (v4 *signer) buildCanonicalString() { + v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) + uri := v4.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = v4.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if v4.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + v4.canonicalString = strings.Join([]string{ + v4.Request.Method, + uri, + v4.Request.URL.RawQuery, + v4.canonicalHeaders + "\n", + v4.signedHeaders, + v4.bodyDigest(), + }, "\n") +} + +func (v4 *signer) buildStringToSign() { + v4.stringToSign = strings.Join([]string{ + authHeaderPrefix, + v4.formattedTime, + v4.credentialString, + hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), + }, "\n") +} + +func (v4 *signer) buildSignature() { + secret := v4.CredValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) + region := makeHmac(date, []byte(v4.Region)) + service := makeHmac(region, []byte(v4.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(v4.stringToSign)) + v4.signature = hex.EncodeToString(signature) +} + +func (v4 *signer) bodyDigest() string { + hash := v4.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if v4.isPresign && v4.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if v4.Body == nil { + hash = hex.EncodeToString(makeSha256([]byte{})) + } else { + hash = hex.EncodeToString(makeSha256Reader(v4.Body)) + } + v4.Request.Header.Add("X-Amz-Content-Sha256", hash) + } + return hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (v4 *signer) isRequestSigned() bool { + if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { + return true + } + if v4.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (v4 *signer) removePresign() { + v4.Query.Del("X-Amz-Algorithm") + v4.Query.Del("X-Amz-Signature") + v4.Query.Del("X-Amz-Security-Token") + v4.Query.Del("X-Amz-Date") + v4.Query.Del("X-Amz-Expires") + v4.Query.Del("X-Amz-Credential") + v4.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go new file mode 100644 index 00000000..b51e9449 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -0,0 +1,134 @@ +package waiter + +import ( + "fmt" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides a collection of configuration values to setup a generated +// waiter code with. +type Config struct { + Name string + Delay int + MaxAttempts int + Operation string + Acceptors []WaitAcceptor +} + +// A WaitAcceptor provides the information needed to wait for an API operation +// to complete. +type WaitAcceptor struct { + Expected interface{} + Matcher string + State string + Argument string +} + +// A Waiter provides waiting for an operation to complete. +type Waiter struct { + Config + Client interface{} + Input interface{} +} + +// Wait waits for an operation to complete, expire max attempts, or fail. Error +// is returned if the operation fails. +func (w *Waiter) Wait() error { + client := reflect.ValueOf(w.Client) + in := reflect.ValueOf(w.Input) + method := client.MethodByName(w.Config.Operation + "Request") + + for i := 0; i < w.MaxAttempts; i++ { + res := method.Call([]reflect.Value{in}) + req := res[0].Interface().(*request.Request) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) + + err := req.Send() + for _, a := range w.Acceptors { + result := false + var vals []interface{} + switch a.Matcher { + case "pathAll", "path": + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case "pathAny": + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case "status": + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case "error": + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + case "pathList": + // ignored matcher + default: + logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", + w.Config.Operation, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + continue + } + + switch a.State { + case "success": + // waiter completed + return nil + case "failure": + // Waiter failure state triggered + return awserr.New("ResourceNotReady", + fmt.Sprintf("failed waiting for successful resource state"), err) + case "retry": + // clear the error and retry the operation + err = nil + default: + logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", + w.Config.Operation, a.State) + } + } + if err != nil { + return err + } + + time.Sleep(time.Second * time.Duration(w.Delay)) + } + + return awserr.New("ResourceNotReady", + fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) +} + +func logf(client reflect.Value, msg string, args ...interface{}) { + cfgVal := client.FieldByName("Config") + if !cfgVal.IsValid() { + return + } + if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { + cfg.Logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go new file mode 100644 index 00000000..8d392eb8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/policy.go @@ -0,0 +1,210 @@ +package sign + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + "time" +) + +// An AWSEpochTime wraps a time value providing JSON serialization needed for +// AWS Policy epoch time fields. +type AWSEpochTime struct { + time.Time +} + +// NewAWSEpochTime returns a new AWSEpochTime pointer wrapping the Go time provided. +func NewAWSEpochTime(t time.Time) *AWSEpochTime { + return &AWSEpochTime{t} +} + +// MarshalJSON serializes the epoch time as AWS Profile epoch time. +func (t AWSEpochTime) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`{"AWS:EpochTime":%d}`, t.UTC().Unix())), nil +} + +// An IPAddress wraps an IPAddress source IP providing JSON serialization information +type IPAddress struct { + SourceIP string `json:"AWS:SourceIp"` +} + +// A Condition defines the restrictions for how a signed URL can be used. +type Condition struct { + // Optional IP address mask the signed URL must be requested from. + IPAddress *IPAddress `json:"IpAddress,omitempty"` + + // Optional date that the signed URL cannot be used until. It is invalid + // to make requests with the signed URL prior to this date. + DateGreaterThan *AWSEpochTime `json:",omitempty"` + + // Required date that the signed URL will expire. A DateLessThan is required + // sign cloud front URLs + DateLessThan *AWSEpochTime `json:",omitempty"` +} + +// A Statement is a collection of conditions for resources +type Statement struct { + // The Web or RTMP resource the URL will be signed for + Resource string + + // The set of conditions for this resource + Condition Condition +} + +// A Policy defines the resources that a signed will be signed for. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +type Policy struct { + // List of resource and condition statements. + // Signed URLs should only provide a single statement. + Statements []Statement `json:"Statement"` +} + +// Override for testing to mock out usage of crypto/rand.Reader +var randReader = rand.Reader + +// Sign will sign a policy using an RSA private key. It will return a base 64 +// encoded signature and policy if no error is encountered. +// +// The signature and policy should be added to the signed URL following the +// guidelines in: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-signed-urls.html +func (p *Policy) Sign(privKey *rsa.PrivateKey) (b64Signature, b64Policy []byte, err error) { + if err = p.Validate(); err != nil { + return nil, nil, err + } + + // Build and escape the policy + b64Policy, jsonPolicy, err := encodePolicy(p) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Policy) + + // Build and escape the signature + b64Signature, err = signEncodedPolicy(randReader, jsonPolicy, privKey) + if err != nil { + return nil, nil, err + } + awsEscapeEncoded(b64Signature) + + return b64Signature, b64Policy, nil +} + +// Validate verifies that the policy is valid and usable, and returns an +// error if there is a problem. +func (p *Policy) Validate() error { + if len(p.Statements) == 0 { + return fmt.Errorf("at least one policy statement is required") + } + for i, s := range p.Statements { + if s.Resource == "" { + return fmt.Errorf("statement at index %d does not have a resource", i) + } + } + + return nil +} + +// CreateResource constructs, validates, and returns a resource URL string. An +// error will be returned if unable to create the resource string. +func CreateResource(scheme, u string) (string, error) { + scheme = strings.ToLower(scheme) + + if scheme == "http" || scheme == "https" { + return u, nil + } + + if scheme == "rtmp" { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil + } + + return "", fmt.Errorf("invalid URL scheme must be http, https, or rtmp. Provided: %s", scheme) +} + +// NewCannedPolicy returns a new Canned Policy constructed using the resource +// and expires time. This can be used to generate the basic model for a Policy +// that can be then augmented with additional conditions. +// +// See the following page for more information on how policies are constructed. +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html#private-content-custom-policy-statement +func NewCannedPolicy(resource string, expires time.Time) *Policy { + return &Policy{ + Statements: []Statement{ + { + Resource: resource, + Condition: Condition{ + DateLessThan: NewAWSEpochTime(expires), + }, + }, + }, + } +} + +// encodePolicy encodes the Policy as JSON and also base 64 encodes it. +func encodePolicy(p *Policy) (b64Policy, jsonPolicy []byte, err error) { + jsonPolicy, err = json.Marshal(p) + if err != nil { + return nil, nil, fmt.Errorf("failed to encode policy, %s", err.Error()) + } + + // Remove leading and trailing white space, JSON encoding will note include + // whitespace within the encoding. + jsonPolicy = bytes.TrimSpace(jsonPolicy) + + b64Policy = make([]byte, base64.StdEncoding.EncodedLen(len(jsonPolicy))) + base64.StdEncoding.Encode(b64Policy, jsonPolicy) + return b64Policy, jsonPolicy, nil +} + +// signEncodedPolicy will sign and base 64 encode the JSON encoded policy. +func signEncodedPolicy(randReader io.Reader, jsonPolicy []byte, privKey *rsa.PrivateKey) ([]byte, error) { + hash := sha1.New() + if _, err := bytes.NewReader(jsonPolicy).WriteTo(hash); err != nil { + return nil, fmt.Errorf("failed to calculate signing hash, %s", err.Error()) + } + + sig, err := rsa.SignPKCS1v15(randReader, privKey, crypto.SHA1, hash.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("failed to sign policy, %s", err.Error()) + } + + b64Sig := make([]byte, base64.StdEncoding.EncodedLen(len(sig))) + base64.StdEncoding.Encode(b64Sig, sig) + return b64Sig, nil +} + +// special characters to be replaced with awsEscapeEncoded +var invalidEncodedChar = map[byte]byte{ + '+': '-', + '=': '_', + '/': '~', +} + +// awsEscapeEncoded will replace base64 encoding's special characters to be URL safe. +func awsEscapeEncoded(b []byte) { + for i, v := range b { + if r, ok := invalidEncodedChar[v]; ok { + b[i] = r + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go new file mode 100644 index 00000000..ffb3c3a7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/privkey.go @@ -0,0 +1,68 @@ +package sign + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" +) + +// LoadPEMPrivKeyFile reads a PEM encoded RSA private key from the file name. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKeyFile(name string) (*rsa.PrivateKey, error) { + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + + return LoadPEMPrivKey(file) +} + +// LoadPEMPrivKey reads a PEM encoded RSA private key from the io.Reader. +// A new RSA private key will be returned if no error. +func LoadPEMPrivKey(reader io.Reader) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// LoadEncryptedPEMPrivKey decrypts the PEM encoded private key using the +// password provided returning a RSA private key. If the PEM data is invalid, +// or unable to decrypt an error will be returned. +func LoadEncryptedPEMPrivKey(reader io.Reader, password []byte) (*rsa.PrivateKey, error) { + block, err := loadPem(reader) + if err != nil { + return nil, err + } + + decryptedBlock, err := x509.DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + + return x509.ParsePKCS1PrivateKey(decryptedBlock) +} + +func loadPem(reader io.Reader) (*pem.Block, error) { + b, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(b) + if block == nil { + // pem.Decode will set block to nil if there is no PEM data in the input + // the second parameter will contain the provided bytes that failed + // to be decoded. + return nil, fmt.Errorf("no valid PEM data provided") + } + + return block, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go new file mode 100644 index 00000000..7138e22f --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/randomreader.go @@ -0,0 +1,30 @@ +package sign + +import ( + "bytes" + "encoding/binary" + "math/rand" +) + +// A randomReader wraps a math/rand.Rand within an reader so that it can used +// as a predictable testing replacement for crypto/rand.Reader +type randomReader struct { + b *bytes.Buffer + r *rand.Rand +} + +// newRandomReader returns a new instance of the random reader +func newRandomReader(r *rand.Rand) *randomReader { + return &randomReader{b: &bytes.Buffer{}, r: r} +} + +// Read will read random bytes from up to the length of b. +func (m *randomReader) Read(b []byte) (int, error) { + for i := 0; i < len(b); { + binary.Write(m.b, binary.LittleEndian, m.r.Int63()) + n, _ := m.b.Read(b[i:]) + i += n + } + + return len(b), nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go new file mode 100644 index 00000000..62406bbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/cloudfront/sign/sign_url.go @@ -0,0 +1,205 @@ +// Package sign provides utilities to generate signed URLs for Amazon CloudFront. +// +// More information about signed URLs and their structure can be found at: +// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html +// +// To sign a URL create a URLSigner with your private key and credential pair key ID. +// Once you have a URLSigner instance you can call Sign or SignWithPolicy to +// sign the URLs. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +package sign + +import ( + "crypto/rsa" + "fmt" + "net/url" + "strings" + "time" +) + +// An URLSigner provides URL signing utilities to sign URLs for Amazon CloudFront +// resources. Using a private key and Credential Key Pair key ID the URLSigner +// only needs to be created once per Credential Key Pair key ID and private key. +// +// The signer is safe to use concurrently. +type URLSigner struct { + keyID string + privKey *rsa.PrivateKey +} + +// NewURLSigner constructs and returns a new URLSigner to be used to for signing +// Amazon CloudFront URL resources with. +func NewURLSigner(keyID string, privKey *rsa.PrivateKey) *URLSigner { + return &URLSigner{ + keyID: keyID, + privKey: privKey, + } +} + +// Sign will sign a single URL to expire at the time of expires sign using the +// Amazon CloudFront default Canned Policy. The URL will be signed with the +// private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// This is the default method of signing Amazon CloudFront URLs. If extra policy +// conditions are need other than URL expiry use SignWithPolicy instead. +// +// Example: +// +// // Sign URL to be valid for 1 hour from now. +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.Sign(rawURL, time.Now().Add(1*time.Hour)) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) Sign(url string, expires time.Time) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + resource, err := CreateResource(scheme, url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, NewCannedPolicy(resource, expires), false, s.privKey) +} + +// SignWithPolicy will sign a URL with the Policy provided. The URL will be +// signed with the private key and Credential Key Pair Key ID previously provided to URLSigner. +// +// Use this signing method if you are looking to sign a URL with more than just +// the URL's expiry time, or reusing Policies between multiple URL signings. +// If only the expiry time is needed you can use Sign and provide just the +// URL's expiry time. A minimum of at least one policy statement is required for a signed URL. +// +// Note: It is not safe to use Polices between multiple signers concurrently +// +// Example: +// +// // Sign URL to be valid for 30 minutes from now, expires one hour from now, and +// // restricted to the 192.0.2.0/24 IP address range. +// policy := &sign.Policy{ +// Statements: []Statement{ +// { +// Resource: rawURL, +// Condition: Condition{ +// // Optional IP source address range +// IPAddress: &IPAddress{SourceIP: "192.0.2.0/24"}, +// // Optional date URL is not valid until +// DateGreaterThan: &AWSEpochTime{time.Now().Add(30 * time.Minute)}, +// // Required date the URL will expire after +// DateLessThan: &AWSEpochTime{time.Now().Add(1 * time.Hour)}, +// } +// } +// } +// } +// +// signer := sign.NewURLSigner(keyID, privKey) +// signedURL, err := signer.SignWithPolicy(rawURL, policy) +// if err != nil { +// log.Fatalf("Failed to sign url, err: %s\n", err.Error()) +// } +// +func (s URLSigner) SignWithPolicy(url string, p *Policy) (string, error) { + scheme, cleanedURL, err := cleanURLScheme(url) + if err != nil { + return "", err + } + + return signURL(scheme, cleanedURL, s.keyID, p, true, s.privKey) +} + +func signURL(scheme, url, keyID string, p *Policy, customPolicy bool, privKey *rsa.PrivateKey) (string, error) { + // Validation URL elements + if err := validateURL(url); err != nil { + return "", err + } + + b64Signature, b64Policy, err := p.Sign(privKey) + if err != nil { + return "", err + } + + // build and return signed URL + builtURL := buildSignedURL(url, keyID, p, customPolicy, b64Policy, b64Signature) + if scheme == "rtmp" { + return buildRTMPURL(builtURL) + } + + return builtURL, nil +} + +func buildSignedURL(baseURL, keyID string, p *Policy, customPolicy bool, b64Policy, b64Signature []byte) string { + pred := "?" + if strings.Contains(baseURL, "?") { + pred = "&" + } + signedURL := baseURL + pred + + if customPolicy { + signedURL += "Policy=" + string(b64Policy) + } else { + signedURL += fmt.Sprintf("Expires=%d", p.Statements[0].Condition.DateLessThan.UTC().Unix()) + } + signedURL += fmt.Sprintf("&Signature=%s&Key-Pair-Id=%s", string(b64Signature), keyID) + + return signedURL +} + +func buildRTMPURL(u string) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", fmt.Errorf("unable to parse rtmp signed URL, err: %s", err) + } + + rtmpURL := strings.TrimLeft(parsed.Path, "/") + if parsed.RawQuery != "" { + rtmpURL = fmt.Sprintf("%s?%s", rtmpURL, parsed.RawQuery) + } + + return rtmpURL, nil +} + +func cleanURLScheme(u string) (scheme, cleanedURL string, err error) { + parts := strings.SplitN(u, "://", 2) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid URL, missing scheme and domain/path") + } + scheme = strings.Replace(parts[0], "*", "", 1) + cleanedURL = fmt.Sprintf("%s://%s", scheme, parts[1]) + + return strings.ToLower(scheme), cleanedURL, nil +} + +var illegalQueryParms = []string{"Expires", "Policy", "Signature", "Key-Pair-Id"} + +func validateURL(u string) error { + parsed, err := url.Parse(u) + if err != nil { + return fmt.Errorf("unable to parse URL, err: %s", err.Error()) + } + + if parsed.Scheme == "" { + return fmt.Errorf("URL missing valid scheme, %s", u) + } + + q := parsed.Query() + for _, p := range illegalQueryParms { + if _, ok := q[p]; ok { + return fmt.Errorf("%s cannot be a query parameter for a signed URL", p) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 00000000..cc07c4c5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,6311 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3 provides a client for Amazon Simple Storage Service. +package s3 + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &AbortMultipartUploadOutput{} + req.Data = output + return +} + +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CompleteMultipartUploadOutput{} + req.Data = output + return +} + +// Completes a multipart upload by assembling previously uploaded parts. +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a request for the CopyObject operation. +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyObjectOutput{} + req.Data = output + return +} + +// Creates a copy of an object that is already stored in Amazon S3. +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + err := req.Send() + return out, err +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a request for the CreateBucket operation. +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateBucketOutput{} + req.Data = output + return +} + +// Creates a new bucket. +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + err := req.Send() + return out, err +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateMultipartUploadOutput{} + req.Data = output + return +} + +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a request for the DeleteBucket operation. +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketOutput{} + req.Data = output + return +} + +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation. +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketCorsOutput{} + req.Data = output + return +} + +// Deletes the cors configuration information set for the bucket. +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketLifecycleOutput{} + req.Data = output + return +} + +// Deletes the lifecycle configuration from the bucket. +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketPolicyOutput{} + req.Data = output + return +} + +// Deletes the policy from the bucket. +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketTaggingOutput{} + req.Data = output + return +} + +// Deletes the tags from the bucket. +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteBucketWebsiteOutput{} + req.Data = output + return +} + +// This operation removes the website configuration from the bucket. +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a request for the DeleteObject operation. +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectOutput{} + req.Data = output + return +} + +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + err := req.Send() + return out, err +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a request for the DeleteObjects operation. +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteObjectsOutput{} + req.Data = output + return +} + +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a request for the GetBucketAcl operation. +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketAclOutput{} + req.Data = output + return +} + +// Gets the access control policy for the bucket. +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a request for the GetBucketCors operation. +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketCorsOutput{} + req.Data = output + return +} + +// Returns the cors configuration for the bucket. +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation. +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Returns the lifecycle configuration information set on the bucket. +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLocationOutput{} + req.Data = output + return +} + +// Returns the region the bucket resides in. +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketLoggingOutput{} + req.Data = output + return +} + +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfigurationDeprecated{} + req.Data = output + return +} + +// Deprecated, see the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + req = c.newRequest(op, input, output) + output = &NotificationConfiguration{} + req.Data = output + return +} + +// Returns the notification configuration of a bucket. +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketPolicyOutput{} + req.Data = output + return +} + +// Returns the policy of a specified bucket. +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketReplicationOutput{} + req.Data = output + return +} + +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Returns the request payment configuration of a bucket. +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketTaggingOutput{} + req.Data = output + return +} + +// Returns the tag set associated with the bucket. +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketVersioningOutput{} + req.Data = output + return +} + +// Returns the versioning state of a bucket. +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + output = &GetBucketWebsiteOutput{} + req.Data = output + return +} + +// Returns the website configuration for a bucket. +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a request for the GetObject operation. +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectOutput{} + req.Data = output + return +} + +// Retrieves objects from Amazon S3. +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a request for the GetObjectAcl operation. +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectAclOutput{} + req.Data = output + return +} + +// Returns the access control list (ACL) of an object. +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + req = c.newRequest(op, input, output) + output = &GetObjectTorrentOutput{} + req.Data = output + return +} + +// Return torrent files from a bucket. +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + err := req.Send() + return out, err +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a request for the HeadBucket operation. +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &HeadBucketOutput{} + req.Data = output + return +} + +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + err := req.Send() + return out, err +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a request for the HeadObject operation. +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &HeadObjectOutput{} + req.Data = output + return +} + +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + err := req.Send() + return out, err +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a request for the ListBuckets operation. +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListBucketsOutput{} + req.Data = output + return +} + +// Returns a list of all buckets owned by the authenticated sender of the request. +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + err := req.Send() + return out, err +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListMultipartUploadsOutput{} + req.Data = output + return +} + +// This operation lists in-progress multipart uploads. +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListMultipartUploadsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListMultipartUploadsOutput), lastPage) + }) +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectVersionsOutput{} + req.Data = output + return +} + +// Returns metadata about all of the versions of objects in a bucket. +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectVersionsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectVersionsOutput), lastPage) + }) +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a request for the ListObjects operation. +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsOutput{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsOutput), lastPage) + }) +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a request for the ListParts operation. +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPartsOutput{} + req.Data = output + return +} + +// Lists the parts that have been uploaded for a specific multipart upload. +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + err := req.Send() + return out, err +} + +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListPartsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListPartsOutput), lastPage) + }) +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a request for the PutBucketAcl operation. +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketAclOutput{} + req.Data = output + return +} + +// Sets the permissions on a bucket using access control lists (ACL). +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a request for the PutBucketCors operation. +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketCorsOutput{} + req.Data = output + return +} + +// Sets the cors configuration for a bucket. +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation. +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLifecycleConfigurationOutput{} + req.Data = output + return +} + +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketLoggingOutput{} + req.Data = output + return +} + +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationOutput{} + req.Data = output + return +} + +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketNotificationConfigurationOutput{} + req.Data = output + return +} + +// Enables notifications of specified events for a bucket. +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketPolicyOutput{} + req.Data = output + return +} + +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketReplicationOutput{} + req.Data = output + return +} + +// Creates a new replication configuration (or replaces an existing one, if +// present). +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketRequestPaymentOutput{} + req.Data = output + return +} + +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketTaggingOutput{} + req.Data = output + return +} + +// Sets the tags for a bucket. +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketVersioningOutput{} + req.Data = output + return +} + +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + err := req.Send() + return out, err +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutBucketWebsiteOutput{} + req.Data = output + return +} + +// Set the website configuration for a bucket. +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + err := req.Send() + return out, err +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a request for the PutObject operation. +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectOutput{} + req.Data = output + return +} + +// Adds an object to a bucket. +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + err := req.Send() + return out, err +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a request for the PutObjectAcl operation. +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + req = c.newRequest(op, input, output) + output = &PutObjectAclOutput{} + req.Data = output + return +} + +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + err := req.Send() + return out, err +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a request for the RestoreObject operation. +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + req = c.newRequest(op, input, output) + output = &RestoreObjectOutput{} + req.Data = output + return +} + +// Restores an archived copy of an object back into Amazon S3 +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + err := req.Send() + return out, err +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a request for the UploadPart operation. +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartOutput{} + req.Data = output + return +} + +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + err := req.Send() + return out, err +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + req = c.newRequest(op, input, output) + output = &UploadPartCopyOutput{} + req.Data = output + return +} + +// Uploads a part by copying data from an existing object as data source. +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + err := req.Send() + return out, err +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +type Delete struct { + _ struct{} `type:"structure"` + + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the buket to get the notification configuration for. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +type LifecycleRule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +type ListPartsInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage + // Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +type ReplicationRule struct { + _ struct{} `type:"structure"` + + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +type Rule struct { + _ struct{} `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +type Tagging struct { + _ struct{} `type:"structure"` + + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadSeeker `type:"blob"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +const ( + // @enum BucketCannedACL + BucketCannedACLPrivate = "private" + // @enum BucketCannedACL + BucketCannedACLPublicRead = "public-read" + // @enum BucketCannedACL + BucketCannedACLPublicReadWrite = "public-read-write" + // @enum BucketCannedACL + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // @enum BucketLocationConstraint + BucketLocationConstraintEu = "EU" + // @enum BucketLocationConstraint + BucketLocationConstraintEuWest1 = "eu-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest1 = "us-west-1" + // @enum BucketLocationConstraint + BucketLocationConstraintUsWest2 = "us-west-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + // @enum BucketLocationConstraint + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + // @enum BucketLocationConstraint + BucketLocationConstraintSaEast1 = "sa-east-1" + // @enum BucketLocationConstraint + BucketLocationConstraintCnNorth1 = "cn-north-1" + // @enum BucketLocationConstraint + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // @enum BucketLogsPermission + BucketLogsPermissionFullControl = "FULL_CONTROL" + // @enum BucketLogsPermission + BucketLogsPermissionRead = "READ" + // @enum BucketLogsPermission + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // @enum BucketVersioningStatus + BucketVersioningStatusEnabled = "Enabled" + // @enum BucketVersioningStatus + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // @enum EncodingType + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // @enum Event + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + // @enum Event + EventS3ObjectCreated = "s3:ObjectCreated:*" + // @enum Event + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + // @enum Event + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + // @enum Event + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + // @enum Event + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + // @enum Event + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + // @enum Event + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + // @enum Event + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // @enum ExpirationStatus + ExpirationStatusEnabled = "Enabled" + // @enum ExpirationStatus + ExpirationStatusDisabled = "Disabled" +) + +const ( + // @enum FilterRuleName + FilterRuleNamePrefix = "prefix" + // @enum FilterRuleName + FilterRuleNameSuffix = "suffix" +) + +const ( + // @enum MFADelete + MFADeleteEnabled = "Enabled" + // @enum MFADelete + MFADeleteDisabled = "Disabled" +) + +const ( + // @enum MFADeleteStatus + MFADeleteStatusEnabled = "Enabled" + // @enum MFADeleteStatus + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // @enum MetadataDirective + MetadataDirectiveCopy = "COPY" + // @enum MetadataDirective + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // @enum ObjectCannedACL + ObjectCannedACLPrivate = "private" + // @enum ObjectCannedACL + ObjectCannedACLPublicRead = "public-read" + // @enum ObjectCannedACL + ObjectCannedACLPublicReadWrite = "public-read-write" + // @enum ObjectCannedACL + ObjectCannedACLAuthenticatedRead = "authenticated-read" + // @enum ObjectCannedACL + ObjectCannedACLAwsExecRead = "aws-exec-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + // @enum ObjectCannedACL + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // @enum ObjectStorageClass + ObjectStorageClassStandard = "STANDARD" + // @enum ObjectStorageClass + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum ObjectStorageClass + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // @enum ObjectVersionStorageClass + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // @enum Payer + PayerRequester = "Requester" + // @enum Payer + PayerBucketOwner = "BucketOwner" +) + +const ( + // @enum Permission + PermissionFullControl = "FULL_CONTROL" + // @enum Permission + PermissionWrite = "WRITE" + // @enum Permission + PermissionWriteAcp = "WRITE_ACP" + // @enum Permission + PermissionRead = "READ" + // @enum Permission + PermissionReadAcp = "READ_ACP" +) + +const ( + // @enum Protocol + ProtocolHttp = "http" + // @enum Protocol + ProtocolHttps = "https" +) + +const ( + // @enum ReplicationRuleStatus + ReplicationRuleStatusEnabled = "Enabled" + // @enum ReplicationRuleStatus + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // @enum ReplicationStatus + ReplicationStatusComplete = "COMPLETE" + // @enum ReplicationStatus + ReplicationStatusPending = "PENDING" + // @enum ReplicationStatus + ReplicationStatusFailed = "FAILED" + // @enum ReplicationStatus + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // @enum RequestCharged + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // @enum RequestPayer + RequestPayerRequester = "requester" +) + +const ( + // @enum ServerSideEncryption + ServerSideEncryptionAes256 = "AES256" + // @enum ServerSideEncryption + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // @enum StorageClass + StorageClassStandard = "STANDARD" + // @enum StorageClass + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + // @enum StorageClass + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum TransitionStorageClass + TransitionStorageClassGlacier = "GLACIER" + // @enum TransitionStorageClass + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // @enum Type + TypeCanonicalUser = "CanonicalUser" + // @enum Type + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + // @enum Type + TypeGroup = "Group" +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 00000000..c3a2702d --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = &loc + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go new file mode 100644 index 00000000..9fc5df94 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + _, err := io.Copy(h, r.Body) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to read body", err) + return + } + _, err = r.Body.Seek(0, 0) + if err != nil { + r.Error = awserr.New("ContentMD5", "failed to seek body", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + sum := h.Sum(nil) + sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) + base64.StdEncoding.Encode(sum64, sum) + r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 00000000..c227f242 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,37 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = func(c *client.Client) { + // Support building custom host-style bucket endpoints + c.Handlers.Build.PushFront(updateHostWithBucket) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } + + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 00000000..b7cf4f9c --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,60 @@ +package s3 + +import ( + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// hostStyleBucketName returns true if the request should put the bucket in +// the host. This is false if S3ForcePathStyle is explicitly set or if the +// bucket is not DNS compatible. +func hostStyleBucketName(r *request.Request, bucket string) bool { + if aws.BoolValue(r.Config.S3ForcePathStyle) { + return false + } + + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // GetBucketLocation should be able to be called from any region within + // a partition, and return the associated region of the bucket. + if r.Operation.Name == opGetBucketLocation { + return false + } + + // Use host-style if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +func updateHostWithBucket(r *request.Request) { + b, _ := awsutil.ValuesAtPath(r.Params, "Bucket") + if len(b) == 0 { + return + } + + if bucket := b[0].(*string); aws.StringValue(bucket) != "" && hostStyleBucketName(r, *bucket) { + r.HTTPRequest.URL.Host = *bucket + "." + r.HTTPRequest.URL.Host + r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) + if r.HTTPRequest.URL.Path == "" { + r.HTTPRequest.URL.Path = "/" + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go new file mode 100644 index 00000000..9e66afb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -0,0 +1,246 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package s3iface provides an interface for the Amazon Simple Storage Service. +package s3iface + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// S3API is the interface type for s3.S3. +type S3API interface { + AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) + + AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) + + CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) + + CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) + + CopyObjectRequest(*s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) + + CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) + + CreateBucketRequest(*s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) + + CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) + + CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) + + CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) + + DeleteBucketRequest(*s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) + + DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) + + DeleteBucketCorsRequest(*s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) + + DeleteBucketCors(*s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) + + DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) + + DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) + + DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) + + DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) + + DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) + + DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) + + DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) + + DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) + + DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) + + DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) + + DeleteObjectRequest(*s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) + + DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) + + DeleteObjectsRequest(*s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) + + DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) + + GetBucketAclRequest(*s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) + + GetBucketAcl(*s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) + + GetBucketCorsRequest(*s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) + + GetBucketCors(*s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) + + GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) + + GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) + + GetBucketLifecycleConfigurationRequest(*s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) + + GetBucketLifecycleConfiguration(*s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) + + GetBucketLocationRequest(*s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) + + GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) + + GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) + + GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) + + GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) + + GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) + + GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) + + GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) + + GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) + + GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) + + GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) + + GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) + + GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) + + GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) + + GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) + + GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) + + GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) + + GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) + + GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) + + GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) + + GetObjectRequest(*s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) + + GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) + + GetObjectAclRequest(*s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) + + GetObjectAcl(*s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) + + GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) + + GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) + + HeadBucketRequest(*s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) + + HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) + + HeadObjectRequest(*s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) + + HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) + + ListBucketsRequest(*s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) + + ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) + + ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) + + ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) + + ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error + + ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) + + ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) + + ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error + + ListObjectsRequest(*s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) + + ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) + + ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error + + ListPartsRequest(*s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) + + ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) + + ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error + + PutBucketAclRequest(*s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) + + PutBucketAcl(*s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) + + PutBucketCorsRequest(*s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) + + PutBucketCors(*s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) + + PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) + + PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) + + PutBucketLifecycleConfigurationRequest(*s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) + + PutBucketLifecycleConfiguration(*s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) + + PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) + + PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) + + PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) + + PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) + + PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) + + PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) + + PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) + + PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) + + PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) + + PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) + + PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) + + PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) + + PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) + + PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) + + PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) + + PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) + + PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) + + PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) + + PutObjectRequest(*s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) + + PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) + + PutObjectAclRequest(*s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) + + PutObjectAcl(*s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) + + RestoreObjectRequest(*s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) + + RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) + + UploadPartRequest(*s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) + + UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) + + UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + + UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) +} + +var _ S3API = (*s3.S3)(nil) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go new file mode 100644 index 00000000..229c0d63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/doc.go @@ -0,0 +1,3 @@ +// Package s3manager provides utilities to upload and download objects from +// S3 concurrently. Helpful for when working with large objects. +package s3manager diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go new file mode 100644 index 00000000..8d6d9310 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -0,0 +1,354 @@ +package s3manager + +import ( + "fmt" + "io" + "net/http" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// DefaultDownloadPartSize is the default range of bytes to get at a time when +// using Download(). +const DefaultDownloadPartSize = 1024 * 1024 * 5 + +// DefaultDownloadConcurrency is the default number of goroutines to spin up +// when using Download(). +const DefaultDownloadConcurrency = 5 + +// The Downloader structure that calls Download(). It is safe to call Download() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Downloader's properties is not safe to be done concurrently. +type Downloader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultConcurrency value will be used. + Concurrency int + + // An S3 client to use when performing downloads. + S3 s3iface.S3API +} + +// NewDownloader creates a new Downloader instance to downloads objects from +// S3 in concurrent chunks. Pass in additional functional options to customize +// the downloader behavior. Requires a client.ConfigProvider in order to create +// a S3 service client. The session.Session satisfies the client.ConfigProvider +// interface. +// +// Example: +// // The session the S3 Downloader will use +// sess := session.New() +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a downloader with the session and custom options +// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: s3.New(c), + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// NewDownloaderWithClient creates a new Downloader instance to downloads +// objects from S3 in concurrent chunks. Pass in additional functional +// options to customize the downloader behavior. Requires a S3 service client +// to make S3 API calls. +// +// Example: +// // The S3 client the S3 Downloader will use +// s3Svc := s3.new(session.New()) +// +// // Create a downloader with the s3 client and default options +// downloader := s3manager.NewDownloaderWithClient(s3Svc) +// +// // Create a downloader with the s3 client and custom options +// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Uploader) { +// d.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader { + d := &Downloader{ + S3: svc, + PartSize: DefaultDownloadPartSize, + Concurrency: DefaultDownloadConcurrency, + } + for _, option := range options { + option(d) + } + + return d +} + +// Download downloads an object in S3 and writes the payload into w using +// concurrent GET requests. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// The w io.WriterAt can be satisfied by an os.File to do multipart concurrent +// downloads, or in memory []byte wrapper using aws.WriteAtBuffer. +func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + impl := downloader{w: w, in: input, ctx: d} + + for _, option := range options { + option(&impl.ctx) + } + + return impl.download() +} + +// downloader is the implementation structure used internally by Downloader. +type downloader struct { + ctx Downloader + + in *s3.GetObjectInput + w io.WriterAt + + wg sync.WaitGroup + m sync.Mutex + + pos int64 + totalBytes int64 + written int64 + err error +} + +// init initializes the downloader with default options. +func (d *downloader) init() { + d.totalBytes = -1 + + if d.ctx.Concurrency == 0 { + d.ctx.Concurrency = DefaultDownloadConcurrency + } + + if d.ctx.PartSize == 0 { + d.ctx.PartSize = DefaultDownloadPartSize + } +} + +// download performs the implementation of the object download across ranged +// GETs. +func (d *downloader) download() (n int64, err error) { + d.init() + + // Spin off first worker to check additional header information + d.getChunk() + + if total := d.getTotalBytes(); total >= 0 { + // Spin up workers + ch := make(chan dlchunk, d.ctx.Concurrency) + + for i := 0; i < d.ctx.Concurrency; i++ { + d.wg.Add(1) + go d.downloadPart(ch) + } + + // Assign work + for d.getErr() == nil { + if d.pos >= total { + break // We're finished queueing chunks + } + + // Queue the next range of bytes to read. + ch <- dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + } + + // Wait for completion + close(ch) + d.wg.Wait() + } else { + // Checking if we read anything new + for d.err == nil { + d.getChunk() + } + + // We expect a 416 error letting us know we are done downloading the + // total bytes. Since we do not know the content's length, this will + // keep grabbing chunks of data until the range of bytes specified in + // the request is out of range of the content. Once, this happens, a + // 416 should occur. + e, ok := d.err.(awserr.RequestFailure) + if ok && e.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + d.err = nil + } + } + + // Return error + return d.written, d.err +} + +// downloadPart is an individual goroutine worker reading from the ch channel +// and performing a GetObject request on the data with a given byte range. +// +// If this is the first worker, this operation also resolves the total number +// of bytes to be read so that the worker manager knows when it is finished. +func (d *downloader) downloadPart(ch chan dlchunk) { + defer d.wg.Done() + for { + chunk, ok := <-ch + if !ok { + break + } + d.downloadChunk(chunk) + } +} + +// getChunk grabs a chunk of data from the body. +// Not thread safe. Should only used when grabbing data on a single thread. +func (d *downloader) getChunk() { + chunk := dlchunk{w: d.w, start: d.pos, size: d.ctx.PartSize} + d.pos += d.ctx.PartSize + d.downloadChunk(chunk) +} + +// downloadChunk downloads the chunk froom s3 +func (d *downloader) downloadChunk(chunk dlchunk) { + if d.getErr() != nil { + return + } + // Get the next byte range of data + in := &s3.GetObjectInput{} + awsutil.Copy(in, d.in) + rng := fmt.Sprintf("bytes=%d-%d", + chunk.start, chunk.start+chunk.size-1) + in.Range = &rng + + req, resp := d.ctx.S3.GetObjectRequest(in) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + err := req.Send() + + if err != nil { + d.setErr(err) + } else { + d.setTotalBytes(resp) // Set total if not yet set. + + n, err := io.Copy(&chunk, resp.Body) + resp.Body.Close() + + if err != nil { + d.setErr(err) + } + d.incrWritten(n) + } +} + +// getTotalBytes is a thread-safe getter for retrieving the total byte status. +func (d *downloader) getTotalBytes() int64 { + d.m.Lock() + defer d.m.Unlock() + + return d.totalBytes +} + +// setTotalBytes is a thread-safe setter for setting the total byte status. +// Will extract the object's total bytes from the Content-Range if the file +// will be chunked, or Content-Length. Content-Length is used when the response +// does not include a Content-Range. Meaning the object was not chunked. This +// occurs when the full file fits within the PartSize directive. +func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { + d.m.Lock() + defer d.m.Unlock() + + if d.totalBytes >= 0 { + return + } + + if resp.ContentRange == nil { + // ContentRange is nil when the full file contents is provied, and + // is not chunked. Use ContentLength instead. + if resp.ContentLength != nil { + d.totalBytes = *resp.ContentLength + return + } + } else { + parts := strings.Split(*resp.ContentRange, "/") + + total := int64(-1) + var err error + // Checking for whether or not a numbered total exists + // If one does not exist, we will assume the total to be -1, undefined, + // and sequentially download each chunk until hitting a 416 error + totalStr := parts[len(parts)-1] + if totalStr != "*" { + total, err = strconv.ParseInt(totalStr, 10, 64) + if err != nil { + d.err = err + return + } + } + + d.totalBytes = total + } +} + +func (d *downloader) incrWritten(n int64) { + d.m.Lock() + defer d.m.Unlock() + + d.written += n +} + +// getErr is a thread-safe getter for the error object +func (d *downloader) getErr() error { + d.m.Lock() + defer d.m.Unlock() + + return d.err +} + +// setErr is a thread-safe setter for the error object +func (d *downloader) setErr(e error) { + d.m.Lock() + defer d.m.Unlock() + + d.err = e +} + +// dlchunk represents a single chunk of data to write by the worker routine. +// This structure also implements an io.SectionReader style interface for +// io.WriterAt, effectively making it an io.SectionWriter (which does not +// exist). +type dlchunk struct { + w io.WriterAt + start int64 + size int64 + cur int64 +} + +// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start +// position to its end (or EOF). +func (c *dlchunk) Write(p []byte) (n int, err error) { + if c.cur >= c.size { + return 0, io.EOF + } + + n, err = c.w.WriteAt(p, c.start+c.cur) + c.cur += int64(n) + + return +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go new file mode 100644 index 00000000..b7d0a125 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/s3manageriface/interface.go @@ -0,0 +1,23 @@ +// Package s3manageriface provides an interface for the s3manager package +package s3manageriface + +import ( + "io" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +// DownloaderAPI is the interface type for s3manager.Downloader. +type DownloaderAPI interface { + Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error) +} + +var _ DownloaderAPI = (*s3manager.Downloader)(nil) + +// UploaderAPI is the interface type for s3manager.Uploader. +type UploaderAPI interface { + Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) +} + +var _ UploaderAPI = (*s3manager.Uploader)(nil) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go new file mode 100644 index 00000000..44ee75a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -0,0 +1,661 @@ +package s3manager + +import ( + "bytes" + "fmt" + "io" + "sort" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3iface" +) + +// MaxUploadParts is the maximum allowed number of parts in a multi-part upload +// on Amazon S3. +const MaxUploadParts = 10000 + +// MinUploadPartSize is the minimum allowed part size when uploading a part to +// Amazon S3. +const MinUploadPartSize int64 = 1024 * 1024 * 5 + +// DefaultUploadPartSize is the default part size to buffer chunks of a +// payload into. +const DefaultUploadPartSize = MinUploadPartSize + +// DefaultUploadConcurrency is the default number of goroutines to spin up when +// using Upload(). +const DefaultUploadConcurrency = 5 + +// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned +// will satisfy this interface when a multi part upload failed to upload all +// chucks to S3. In the case of a failure the UploadID is needed to operate on +// the chunks, if any, which were uploaded. +// +// Example: +// +// u := s3manager.NewUploader(opts) +// output, err := u.upload(input) +// if err != nil { +// if multierr, ok := err.(MultiUploadFailure); ok { +// // Process error and its associated uploadID +// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) +// } else { +// // Process error generically +// fmt.Println("Error:", err.Error()) +// } +// } +// +type MultiUploadFailure interface { + awserr.Error + + // Returns the upload id for the S3 multipart upload that failed. + UploadID() string +} + +// So that the Error interface type can be included as an anonymous field +// in the multiUploadError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A multiUploadError wraps the upload ID of a failed s3 multipart upload. +// Composed of BaseError for code, message, and original error +// +// Should be used for an error that occurred failing a S3 multipart upload, +// and a upload ID is available. If an uploadID is not available a more relevant +type multiUploadError struct { + awsError + + // ID for multipart upload which failed. + uploadID string +} + +// Error returns the string representation of the error. +// +// See apierr.BaseError ErrorWithExtra for output format +// +// Satisfies the error interface. +func (m multiUploadError) Error() string { + extra := fmt.Sprintf("upload id: %s", m.uploadID) + return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (m multiUploadError) String() string { + return m.Error() +} + +// UploadID returns the id of the S3 upload which failed. +func (m multiUploadError) UploadID() string { + return m.uploadID +} + +// UploadInput contains all input for upload requests to Amazon S3. +type UploadInput struct { + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` + + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256, + // aws:kms). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + + // The readable body payload to send to S3. + Body io.Reader +} + +// UploadOutput represents a response from the Upload() call. +type UploadOutput struct { + // The URL where the object was uploaded to. + Location string + + // The version of the object that was uploaded. Will only be populated if + // the S3 Bucket is versioned. If the bucket is not versioned this field + // will not be set. + VersionID *string + + // The ID for a multipart upload to S3. In the case of an error the error + // can be cast to the MultiUploadFailure interface to extract the upload ID. + UploadID string +} + +// The Uploader structure that calls Upload(). It is safe to call Upload() +// on this structure for multiple objects and across concurrent goroutines. +// Mutating the Uploader's properties is not safe to be done concurrently. +type Uploader struct { + // The buffer size (in bytes) to use when buffering data into chunks and + // sending them as parts to S3. The minimum allowed part size is 5MB, and + // if this value is set to zero, the DefaultPartSize value will be used. + PartSize int64 + + // The number of goroutines to spin up in parallel when sending parts. + // If this is set to zero, the DefaultConcurrency value will be used. + Concurrency int + + // Setting this value to true will cause the SDK to avoid calling + // AbortMultipartUpload on a failure, leaving all successfully uploaded + // parts on S3 for manual recovery. + // + // Note that storing parts of an incomplete multipart upload counts towards + // space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError bool + + // MaxUploadParts is the max number of parts which will be uploaded to S3. + // Will be used to calculate the partsize of the object to be uploaded. + // E.g: 5GB file, with MaxUploadParts set to 100, will upload the file + // as 100, 50MB parts. + // With a limited of s3.MaxUploadParts (10,000 parts). + MaxUploadParts int + + // The client to use when uploading to S3. + S3 s3iface.S3API +} + +// NewUploader creates a new Uploader instance to upload objects to S3. Pass In +// additional functional options to customize the uploader's behavior. Requires a +// client.ConfigProvider in order to create a S3 service client. The session.Session +// satisfies the client.ConfigProvider interface. +// +// Example: +// // The session the S3 Uploader will use +// sess := session.New() +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// // Create an uploader with the session and custom options +// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: s3.New(c), + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in +// additional functional options to customize the uploader's behavior. Requires +// a S3 service client to make S3 API calls. +// +// Example: +// // S3 service client the Upload manager will use. +// s3Svc := s3.New(session.New()) +// +// // Create an uploader with S3 client and default options +// uploader := s3manager.NewUploaderWithClient(s3Svc) +// +// // Create an uploader with S3 client and custom options +// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) { +// u.PartSize = 64 * 1024 * 1024 // 64MB per part +// }) +func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader { + u := &Uploader{ + S3: svc, + PartSize: DefaultUploadPartSize, + Concurrency: DefaultUploadConcurrency, + LeavePartsOnError: false, + MaxUploadParts: MaxUploadParts, + } + + for _, option := range options { + option(u) + } + + return u +} + +// Upload uploads an object to S3, intelligently buffering large files into +// smaller chunks and sending them in parallel across multiple goroutines. You +// can configure the buffer size and concurrency through the Uploader's parameters. +// +// Additional functional options can be provided to configure the individual +// upload. These options are copies of the Uploader instance Upload is called from. +// Modifying the options will not impact the original Uploader instance. +// +// It is safe to call this method concurrently across goroutines. +// +// Example: +// // Upload input parameters +// upParams := &s3manager.UploadInput{ +// Bucket: &bucketName, +// Key: &keyName, +// Body: file, +// } +// +// // Perform an upload. +// result, err := uploader.Upload(upParams) +// +// // Perform upload with options different than the those in the Uploader. +// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) { +// u.PartSize = 10 * 1024 * 1024 // 10MB part size +// u.LeavePartsOnError = true // Dont delete the parts if the upload fails. +// }) +func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) { + i := uploader{in: input, ctx: u} + + for _, option := range options { + option(&i.ctx) + } + + return i.upload() +} + +// internal structure to manage an upload to S3. +type uploader struct { + ctx Uploader + + in *UploadInput + + readerPos int64 // current reader position + totalSize int64 // set to -1 if the size is not known +} + +// internal logic for deciding whether to upload a single part or use a +// multipart upload. +func (u *uploader) upload() (*UploadOutput, error) { + u.init() + + if u.ctx.PartSize < MinUploadPartSize { + msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) + return nil, awserr.New("ConfigError", msg, nil) + } + + // Do one read to determine if we have more than one part + buf, err := u.nextReader() + if err == io.EOF || err == io.ErrUnexpectedEOF { // single part + return u.singlePart(buf) + } else if err != nil { + return nil, awserr.New("ReadRequestBody", "read upload data failed", err) + } + + mu := multiuploader{uploader: u} + return mu.upload(buf) +} + +// init will initialize all default options. +func (u *uploader) init() { + if u.ctx.Concurrency == 0 { + u.ctx.Concurrency = DefaultUploadConcurrency + } + if u.ctx.PartSize == 0 { + u.ctx.PartSize = DefaultUploadPartSize + } + + // Try to get the total size for some optimizations + u.initSize() +} + +// initSize tries to detect the total stream size, setting u.totalSize. If +// the size is not known, totalSize is set to -1. +func (u *uploader) initSize() { + u.totalSize = -1 + + switch r := u.in.Body.(type) { + case io.Seeker: + pos, _ := r.Seek(0, 1) + defer r.Seek(pos, 0) + + n, err := r.Seek(0, 2) + if err != nil { + return + } + u.totalSize = n + + // Try to adjust partSize if it is too small and account for + // integer division truncation. + if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) { + // Add one to the part size to account for remainders + // during the size calculation. e.g odd number of bytes. + u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1 + } + } +} + +// nextReader returns a seekable reader representing the next packet of data. +// This operation increases the shared u.readerPos counter, but note that it +// does not need to be wrapped in a mutex because nextReader is only called +// from the main thread. +func (u *uploader) nextReader() (io.ReadSeeker, error) { + switch r := u.in.Body.(type) { + case io.ReaderAt: + var err error + + n := u.ctx.PartSize + if u.totalSize >= 0 { + bytesLeft := u.totalSize - u.readerPos + + if bytesLeft == 0 { + err = io.EOF + n = bytesLeft + } else if bytesLeft <= u.ctx.PartSize { + err = io.ErrUnexpectedEOF + n = bytesLeft + } + } + + buf := io.NewSectionReader(r, u.readerPos, n) + u.readerPos += n + + return buf, err + + default: + packet := make([]byte, u.ctx.PartSize) + n, err := io.ReadFull(u.in.Body, packet) + u.readerPos += int64(n) + + return bytes.NewReader(packet[0:n]), err + } +} + +// singlePart contains upload logic for uploading a single chunk via +// a regular PutObject request. Multipart requests require at least two +// parts, or at least 5MB of data. +func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.PutObjectInput{} + awsutil.Copy(params, u.in) + params.Body = buf + + req, out := u.ctx.S3.PutObjectRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + + url := req.HTTPRequest.URL.String() + return &UploadOutput{ + Location: url, + VersionID: out.VersionId, + }, nil +} + +// internal structure to manage a specific multipart upload to S3. +type multiuploader struct { + *uploader + wg sync.WaitGroup + m sync.Mutex + err error + uploadID string + parts completedParts +} + +// keeps track of a single chunk of data being sent to S3. +type chunk struct { + buf io.ReadSeeker + num int64 +} + +// completedParts is a wrapper to make parts sortable by their part number, +// since S3 required this list to be sent in sorted order. +type completedParts []*s3.CompletedPart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } + +// upload will perform a multipart upload using the firstBuf buffer containing +// the first chunk of data. +func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) { + params := &s3.CreateMultipartUploadInput{} + awsutil.Copy(params, u.in) + + // Create the multipart + req, resp := u.ctx.S3.CreateMultipartUploadRequest(params) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return nil, err + } + u.uploadID = *resp.UploadId + + // Create the workers + ch := make(chan chunk, u.ctx.Concurrency) + for i := 0; i < u.ctx.Concurrency; i++ { + u.wg.Add(1) + go u.readChunk(ch) + } + + // Send part 1 to the workers + var num int64 = 1 + ch <- chunk{buf: firstBuf, num: num} + + // Read and queue the rest of the parts + for u.geterr() == nil { + // This upload exceeded maximum number of supported parts, error now. + if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) { + var msg string + if num > int64(u.ctx.MaxUploadParts) { + msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit", + u.ctx.MaxUploadParts) + } else { + msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit", + MaxUploadParts) + } + u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) + break + } + num++ + + buf, err := u.nextReader() + if err == io.EOF { + break + } + + ch <- chunk{buf: buf, num: num} + + if err != nil && err != io.ErrUnexpectedEOF { + u.seterr(awserr.New( + "ReadRequestBody", + "read multipart upload data failed", + err)) + break + } + } + + // Close the channel, wait for workers, and complete upload + close(ch) + u.wg.Wait() + complete := u.complete() + + if err := u.geterr(); err != nil { + return nil, &multiUploadError{ + awsError: awserr.New( + "MultipartUpload", + "upload multipart failed", + err), + uploadID: u.uploadID, + } + } + return &UploadOutput{ + Location: *complete.Location, + VersionID: complete.VersionId, + UploadID: u.uploadID, + }, nil +} + +// readChunk runs in worker goroutines to pull chunks off of the ch channel +// and send() them as UploadPart requests. +func (u *multiuploader) readChunk(ch chan chunk) { + defer u.wg.Done() + for { + data, ok := <-ch + + if !ok { + break + } + + if u.geterr() == nil { + if err := u.send(data); err != nil { + u.seterr(err) + } + } + } +} + +// send performs an UploadPart request and keeps track of the completed +// part information. +func (u *multiuploader) send(c chunk) error { + req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + Body: c.buf, + UploadId: &u.uploadID, + PartNumber: &c.num, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + return err + } + + n := c.num + completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} + + u.m.Lock() + u.parts = append(u.parts, completed) + u.m.Unlock() + + return nil +} + +// geterr is a thread-safe getter for the error object +func (u *multiuploader) geterr() error { + u.m.Lock() + defer u.m.Unlock() + + return u.err +} + +// seterr is a thread-safe setter for the error object +func (u *multiuploader) seterr(e error) { + u.m.Lock() + defer u.m.Unlock() + + u.err = e +} + +// fail will abort the multipart unless LeavePartsOnError is set to true. +func (u *multiuploader) fail() { + if u.ctx.LeavePartsOnError { + return + } + + req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + req.Send() +} + +// complete successfully completes a multipart upload and returns the response. +func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { + if u.geterr() != nil { + u.fail() + return nil + } + + // Parts must be sorted in PartNumber order. + sort.Sort(u.parts) + + req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: u.in.Bucket, + Key: u.in.Key, + UploadId: &u.uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, + }) + req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager")) + if err := req.Send(); err != nil { + u.seterr(err) + u.fail() + } + + return resp +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 00000000..cf01da53 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,86 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// S3 is a client for Amazon S3. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "s3" + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 00000000..268ea2fb --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme != "https" { + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + if len(p) > 0 { + r.Error = errSSERequiresSSL + } + } +} + +func computeSSEKeys(r *request.Request) { + headers := []string{ + "x-amz-server-side-encryption-customer-key", + "x-amz-copy-source-server-side-encryption-customer-key", + } + + for _, h := range headers { + md5h := h + "-md5" + if key := r.HTTPRequest.Header.Get(h); key != "" { + // Base64-encode the value + b64v := base64.StdEncoding.EncodeToString([]byte(key)) + r.HTTPRequest.Header.Set(h, b64v) + + // Add MD5 if it wasn't computed + if r.HTTPRequest.Header.Get(md5h) == "" { + sum := md5.Sum([]byte(key)) + b64sum := base64.StdEncoding.EncodeToString(sum[:]) + r.HTTPRequest.Header.Set(md5h, b64sum) + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 00000000..ce65fcda --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = aws.ReadSeekCloser(body) + defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 00000000..cd600482 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,57 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + if r.HTTPResponse.ContentLength == 0 { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + } else { + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/waiters.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 00000000..4879fca8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,117 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 403, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadBucket", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + waiterCfg := waiter.Config{ + Operation: "HeadObject", + Delay: 5, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 404, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/Godeps/_workspace/src/github.com/docker/goamz/cloudfront/cloudfront.go b/Godeps/_workspace/src/github.com/docker/goamz/cloudfront/cloudfront.go deleted file mode 100644 index 0bffe88a..00000000 --- a/Godeps/_workspace/src/github.com/docker/goamz/cloudfront/cloudfront.go +++ /dev/null @@ -1,143 +0,0 @@ -package cloudfront - -import ( - "crypto" - "crypto/rsa" - "crypto/sha1" - "encoding/base64" - "encoding/json" - "fmt" - "github.com/docker/goamz/aws" - "net/url" - "strconv" - "strings" - "time" -) - -type CloudFront struct { - BaseURL string - keyPairId string - key *rsa.PrivateKey -} - -var base64Replacer = strings.NewReplacer("=", "_", "+", "-", "/", "~") - -func NewKeyLess(auth aws.Auth, baseurl string) *CloudFront { - return &CloudFront{keyPairId: auth.AccessKey, BaseURL: baseurl} -} - -func New(baseurl string, key *rsa.PrivateKey, keyPairId string) *CloudFront { - return &CloudFront{ - BaseURL: baseurl, - keyPairId: keyPairId, - key: key, - } -} - -type epochTime struct { - EpochTime int64 `json:"AWS:EpochTime"` -} - -type condition struct { - DateLessThan epochTime -} - -type statement struct { - Resource string - Condition condition -} - -type policy struct { - Statement []statement -} - -func buildPolicy(resource string, expireTime time.Time) ([]byte, error) { - p := &policy{ - Statement: []statement{ - statement{ - Resource: resource, - Condition: condition{ - DateLessThan: epochTime{ - EpochTime: expireTime.Truncate(time.Millisecond).Unix(), - }, - }, - }, - }, - } - - return json.Marshal(p) -} - -func (cf *CloudFront) generateSignature(policy []byte) (string, error) { - hash := sha1.New() - _, err := hash.Write(policy) - if err != nil { - return "", err - } - - hashed := hash.Sum(nil) - var signed []byte - if cf.key.Validate() == nil { - signed, err = rsa.SignPKCS1v15(nil, cf.key, crypto.SHA1, hashed) - if err != nil { - return "", err - } - } else { - signed = hashed - } - encoded := base64Replacer.Replace(base64.StdEncoding.EncodeToString(signed)) - - return encoded, nil -} - -// Creates a signed url using RSAwithSHA1 as specified by -// http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-signature -func (cf *CloudFront) CannedSignedURL(path, queryString string, expires time.Time) (string, error) { - resource := cf.BaseURL + path - if queryString != "" { - resource = path + "?" + queryString - } - - policy, err := buildPolicy(resource, expires) - if err != nil { - return "", err - } - - signature, err := cf.generateSignature(policy) - if err != nil { - return "", err - } - - // TOOD: Do this once - uri, err := url.Parse(cf.BaseURL) - if err != nil { - return "", err - } - - uri.RawQuery = queryString - if queryString != "" { - uri.RawQuery += "&" - } - - expireTime := expires.Truncate(time.Millisecond).Unix() - - uri.Path = path - uri.RawQuery += fmt.Sprintf("Expires=%d&Signature=%s&Key-Pair-Id=%s", expireTime, signature, cf.keyPairId) - - return uri.String(), nil -} - -func (cloudfront *CloudFront) SignedURL(path, querystrings string, expires time.Time) string { - policy := `{"Statement":[{"Resource":"` + path + "?" + querystrings + `,"Condition":{"DateLessThan":{"AWS:EpochTime":` + strconv.FormatInt(expires.Truncate(time.Millisecond).Unix(), 10) + `}}}]}` - - hash := sha1.New() - hash.Write([]byte(policy)) - b := hash.Sum(nil) - he := base64.StdEncoding.EncodeToString(b) - - policySha1 := he - - url := cloudfront.BaseURL + path + "?" + querystrings + "&Expires=" + strconv.FormatInt(expires.Unix(), 10) + "&Signature=" + policySha1 + "&Key-Pair-Id=" + cloudfront.keyPairId - - return url -} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore new file mode 100644 index 00000000..7adca943 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README.md b/Godeps/_workspace/src/github.com/go-ini/ini/README.md new file mode 100644 index 00000000..c52f63a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README.md @@ -0,0 +1,590 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +To auto-split value into slice: + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 00000000..ce6d8eb4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,577 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +自动分割键值为切片(slice): + +```go +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go new file mode 100644 index 00000000..1a27f068 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/ini.go @@ -0,0 +1,1027 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + + _VERSION = "1.8.6" +) + +func Version() string { + return _VERSION +} + +var ( + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Write spaces around "=" to look better. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is a interface that returns file content. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// ____ __. +// | |/ _|____ ___.__. +// | <_/ __ < | | +// | | \ ___/\___ | +// |____|__ \___ > ____| +// \/ \/\/ + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. +func (k *Key) Float64s(delim string) []float64 { + strs := k.Strings(delim) + vals := make([]float64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseFloat(strs[i], 64) + } + return vals +} + +// Ints returns list of int divided by given delimiter. +func (k *Key) Ints(delim string) []int { + strs := k.Strings(delim) + vals := make([]int, len(strs)) + for i := range strs { + vals[i], _ = strconv.Atoi(strs[i]) + } + return vals +} + +// Int64s returns list of int64 divided by given delimiter. +func (k *Key) Int64s(delim string) []int64 { + strs := k.Strings(delim) + vals := make([]int64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseInt(strs[i], 10, 64) + } + return vals +} + +// Uints returns list of uint divided by given delimiter. +func (k *Key) Uints(delim string) []uint { + strs := k.Strings(delim) + vals := make([]uint, len(strs)) + for i := range strs { + u, _ := strconv.ParseUint(strs[i], 10, 0) + vals[i] = uint(u) + } + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. +func (k *Key) Uint64s(delim string) []uint64 { + strs := k.Strings(delim) + vals := make([]uint64, len(strs)) + for i := range strs { + vals[i], _ = strconv.ParseUint(strs[i], 10, 64) + } + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) TimesFormat(format, delim string) []time.Time { + strs := k.Strings(delim) + vals := make([]time.Time, len(strs)) + for i := range strs { + vals[i], _ = time.Parse(format, strs[i]) + } + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} + +// _________ __ .__ +// / _____/ ____ _____/ |_|__| ____ ____ +// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ +// / \ ___/\ \___| | | ( <_> ) | \ +// /_______ /\___ >\___ >__| |__|\____/|___| / +// \/ \/ \/ \/ + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} + +// ___________.__.__ +// \_ _____/|__| | ____ +// | __) | | | _/ __ \ +// | \ | | |_\ ___/ +// \___ / |__|____/\___ > +// \/ \/ + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +func Load(source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes file content into io.Writer with given value indention. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty. + if len(sec.keyList) == 0 { + continue + } + } + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";". + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/parser.go b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go new file mode 100644 index 00000000..1c1bf91f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/parser.go @@ -0,0 +1,312 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines + if line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.IndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + section, err = f.NewSection(string(line[1:closeIdx])) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:]) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct.go b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go new file mode 100644 index 00000000..3fb92c39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-ini/ini/struct.go @@ -0,0 +1,351 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 00000000..531fcc11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 00000000..1f980775 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 00000000..b03310a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 00000000..a828d284 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 00000000..187ef676 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 00000000..9cfa988b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 00000000..1cd2d239 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json new file mode 100644 index 00000000..d550e969 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/basic.json @@ -0,0 +1,96 @@ +[{ + "given": + {"foo": {"bar": {"baz": "correct"}}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": {"baz": "correct"}} + }, + { + "expression": "foo.bar", + "result": {"baz": "correct"} + }, + { + "expression": "foo.bar.baz", + "result": "correct" + }, + { + "expression": "foo\n.\nbar\n.baz", + "result": "correct" + }, + { + "expression": "foo.bar.baz.bad", + "result": null + }, + { + "expression": "foo.bar.bad", + "result": null + }, + { + "expression": "foo.bad", + "result": null + }, + { + "expression": "bad", + "result": null + }, + { + "expression": "bad.morebad.morebad", + "result": null + } + ] +}, +{ + "given": + {"foo": {"bar": ["one", "two", "three"]}}, + "cases": [ + { + "expression": "foo", + "result": {"bar": ["one", "two", "three"]} + }, + { + "expression": "foo.bar", + "result": ["one", "two", "three"] + } + ] +}, +{ + "given": ["one", "two", "three"], + "cases": [ + { + "expression": "one", + "result": null + }, + { + "expression": "two", + "result": null + }, + { + "expression": "three", + "result": null + }, + { + "expression": "one.two", + "result": null + } + ] +}, +{ + "given": + {"foo": {"1": ["one", "two", "three"], "-1": "bar"}}, + "cases": [ + { + "expression": "foo.\"1\"", + "result": ["one", "two", "three"] + }, + { + "expression": "foo.\"1\"[0]", + "result": "one" + }, + { + "expression": "foo.\"-1\"", + "result": "bar" + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json new file mode 100644 index 00000000..e3fa196b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/boolean.json @@ -0,0 +1,257 @@ +[ + { + "given": { + "outer": { + "foo": "foo", + "bar": "bar", + "baz": "baz" + } + }, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] + }, + { + "given": { + "outer": { + "foo": "foo", + "bool": false, + "empty_list": [], + "empty_string": "" + } + }, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] + }, + { + "given": { + "True": true, + "False": false, + "Number": 5, + "EmptyList": [], + "Zero": 0 + }, + "cases": [ + { + "expression": "True && False", + "result": false + }, + { + "expression": "False && True", + "result": false + }, + { + "expression": "True && True", + "result": true + }, + { + "expression": "False && False", + "result": false + }, + { + "expression": "True && Number", + "result": 5 + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "Number && False", + "result": false + }, + { + "expression": "Number && EmptyList", + "result": [] + }, + { + "expression": "Number && True", + "result": true + }, + { + "expression": "EmptyList && True", + "result": [] + }, + { + "expression": "EmptyList && False", + "result": [] + }, + { + "expression": "True || False", + "result": true + }, + { + "expression": "True || True", + "result": true + }, + { + "expression": "False || True", + "result": true + }, + { + "expression": "False || False", + "result": false + }, + { + "expression": "Number || EmptyList", + "result": 5 + }, + { + "expression": "Number || True", + "result": 5 + }, + { + "expression": "Number || True && False", + "result": 5 + }, + { + "expression": "(Number || True) && False", + "result": false + }, + { + "expression": "Number || (True && False)", + "result": 5 + }, + { + "expression": "!True", + "result": false + }, + { + "expression": "!False", + "result": true + }, + { + "expression": "!Number", + "result": false + }, + { + "expression": "!EmptyList", + "result": true + }, + { + "expression": "True && !False", + "result": true + }, + { + "expression": "True && !EmptyList", + "result": true + }, + { + "expression": "!False && !EmptyList", + "result": true + }, + { + "expression": "!(True && False)", + "result": true + }, + { + "expression": "!Zero", + "result": false + }, + { + "expression": "!!Zero", + "result": true + } + ] + }, + { + "given": { + "one": 1, + "two": 2, + "three": 3 + }, + "cases": [ + { + "expression": "one < two", + "result": true + }, + { + "expression": "one <= two", + "result": true + }, + { + "expression": "one == one", + "result": true + }, + { + "expression": "one == two", + "result": false + }, + { + "expression": "one > two", + "result": false + }, + { + "expression": "one >= two", + "result": false + }, + { + "expression": "one != two", + "result": true + }, + { + "expression": "one < two && three > one", + "result": true + }, + { + "expression": "one < two || three > one", + "result": true + }, + { + "expression": "one < two || three < one", + "result": true + }, + { + "expression": "two < one || three < one", + "result": false + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json new file mode 100644 index 00000000..0c26248d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/current.json @@ -0,0 +1,25 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "@", + "result": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + } + }, + { + "expression": "@.bar", + "result": {"baz": "qux"} + }, + { + "expression": "@.foo[0]", + "result": {"name": "a"} + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json new file mode 100644 index 00000000..4a62d951 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/escape.json @@ -0,0 +1,46 @@ +[{ + "given": { + "foo.bar": "dot", + "foo bar": "space", + "foo\nbar": "newline", + "foo\"bar": "doublequote", + "c:\\\\windows\\path": "windows", + "/unix/path": "unix", + "\"\"\"": "threequotes", + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "\"foo.bar\"", + "result": "dot" + }, + { + "expression": "\"foo bar\"", + "result": "space" + }, + { + "expression": "\"foo\\nbar\"", + "result": "newline" + }, + { + "expression": "\"foo\\\"bar\"", + "result": "doublequote" + }, + { + "expression": "\"c:\\\\\\\\windows\\\\path\"", + "result": "windows" + }, + { + "expression": "\"/unix/path\"", + "result": "unix" + }, + { + "expression": "\"\\\"\\\"\\\"\"", + "result": "threequotes" + }, + { + "expression": "\"bar\".\"baz\"", + "result": "qux" + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json new file mode 100644 index 00000000..5b9f52b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/filters.json @@ -0,0 +1,468 @@ +[ + { + "given": {"foo": [{"name": "a"}, {"name": "b"}]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "foo[?name == 'a']", + "result": [{"name": "a"}] + } + ] + }, + { + "given": {"foo": [0, 1], "bar": [2, 3]}, + "cases": [ + { + "comment": "Matching a literal", + "expression": "*[?[0] == `0`]", + "result": [[], []] + } + ] + }, + { + "given": {"foo": [{"first": "foo", "last": "bar"}, + {"first": "foo", "last": "foo"}, + {"first": "foo", "last": "baz"}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?first == last]", + "result": [{"first": "foo", "last": "foo"}] + }, + { + "comment": "Verify projection created from filter", + "expression": "foo[?first == last].first", + "result": ["foo"] + } + ] + }, + { + "given": {"foo": [{"age": 20}, + {"age": 25}, + {"age": 30}]}, + "cases": [ + { + "comment": "Greater than with a number", + "expression": "foo[?age > `25`]", + "result": [{"age": 30}] + }, + { + "expression": "foo[?age >= `25`]", + "result": [{"age": 25}, {"age": 30}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age > `30`]", + "result": [] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `25`]", + "result": [{"age": 20}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age <= `25`]", + "result": [{"age": 20}, {"age": 25}] + }, + { + "comment": "Greater than with a number", + "expression": "foo[?age < `20`]", + "result": [] + }, + { + "expression": "foo[?age == `20`]", + "result": [{"age": 20}] + }, + { + "expression": "foo[?age != `20`]", + "result": [{"age": 25}, {"age": 30}] + } + ] + }, + { + "given": {"foo": [{"top": {"name": "a"}}, + {"top": {"name": "b"}}]}, + "cases": [ + { + "comment": "Filter with subexpression", + "expression": "foo[?top.name == 'a']", + "result": [{"top": {"name": "a"}}] + } + ] + }, + { + "given": {"foo": [{"top": {"first": "foo", "last": "bar"}}, + {"top": {"first": "foo", "last": "foo"}}, + {"top": {"first": "foo", "last": "baz"}}]}, + "cases": [ + { + "comment": "Matching an expression", + "expression": "foo[?top.first == top.last]", + "result": [{"top": {"first": "foo", "last": "foo"}}] + }, + { + "comment": "Matching a JSON array", + "expression": "foo[?top == `{\"first\": \"foo\", \"last\": \"bar\"}`]", + "result": [{"top": {"first": "foo", "last": "bar"}}] + } + ] + }, + { + "given": {"foo": [ + {"key": true}, + {"key": false}, + {"key": 0}, + {"key": 1}, + {"key": [0]}, + {"key": {"bar": [0]}}, + {"key": null}, + {"key": [1]}, + {"key": {"a":2}} + ]}, + "cases": [ + { + "expression": "foo[?key == `true`]", + "result": [{"key": true}] + }, + { + "expression": "foo[?key == `false`]", + "result": [{"key": false}] + }, + { + "expression": "foo[?key == `0`]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?key == `1`]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?key == `[0]`]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?key == `{\"bar\": [0]}`]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?key == `null`]", + "result": [{"key": null}] + }, + { + "expression": "foo[?key == `[1]`]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?key == `{\"a\":2}`]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?`true` == key]", + "result": [{"key": true}] + }, + { + "expression": "foo[?`false` == key]", + "result": [{"key": false}] + }, + { + "expression": "foo[?`0` == key]", + "result": [{"key": 0}] + }, + { + "expression": "foo[?`1` == key]", + "result": [{"key": 1}] + }, + { + "expression": "foo[?`[0]` == key]", + "result": [{"key": [0]}] + }, + { + "expression": "foo[?`{\"bar\": [0]}` == key]", + "result": [{"key": {"bar": [0]}}] + }, + { + "expression": "foo[?`null` == key]", + "result": [{"key": null}] + }, + { + "expression": "foo[?`[1]` == key]", + "result": [{"key": [1]}] + }, + { + "expression": "foo[?`{\"a\":2}` == key]", + "result": [{"key": {"a":2}}] + }, + { + "expression": "foo[?key != `true`]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `false`]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `0`]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `1`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `null`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `[1]`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?key != `{\"a\":2}`]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + }, + { + "expression": "foo[?`true` != key]", + "result": [{"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`false` != key]", + "result": [{"key": true}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`0` != key]", + "result": [{"key": true}, {"key": false}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`1` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`null` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": [1]}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`[1]` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": {"a":2}}] + }, + { + "expression": "foo[?`{\"a\":2}` != key]", + "result": [{"key": true}, {"key": false}, {"key": 0}, {"key": 1}, {"key": [0]}, + {"key": {"bar": [0]}}, {"key": null}, {"key": [1]}] + } + ] + }, + { + "given": {"reservations": [ + {"instances": [ + {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}, + {"foo": 1, "bar": 2}, {"foo": 2, "bar": 1}]}]}, + "cases": [ + { + "expression": "reservations[].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[*].instances[?bar==`1`]", + "result": [[{"foo": 2, "bar": 1}]] + }, + { + "expression": "reservations[].instances[?bar==`1`][]", + "result": [{"foo": 2, "bar": 1}] + } + ] + }, + { + "given": { + "baz": "other", + "foo": [ + {"bar": 1}, {"bar": 2}, {"bar": 3}, {"bar": 4}, {"bar": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?bar==`1`].bar[0]", + "result": [] + } + ] + }, + { + "given": { + "foo": [ + {"a": 1, "b": {"c": "x"}}, + {"a": 1, "b": {"c": "y"}}, + {"a": 1, "b": {"c": "z"}}, + {"a": 2, "b": {"c": "z"}}, + {"a": 1, "baz": 2} + ] + }, + "cases": [ + { + "expression": "foo[?a==`1`].b.c", + "result": ["x", "y", "z"] + } + ] + }, + { + "given": {"foo": [{"name": "a"}, {"name": "b"}, {"name": "c"}]}, + "cases": [ + { + "comment": "Filter with or expression", + "expression": "foo[?name == 'a' || name == 'b']", + "result": [{"name": "a"}, {"name": "b"}] + }, + { + "expression": "foo[?name == 'a' || name == 'e']", + "result": [{"name": "a"}] + }, + { + "expression": "foo[?name == 'a' || name == 'b' || name == 'c']", + "result": [{"name": "a"}, {"name": "b"}, {"name": "c"}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2}, {"a": 1, "b": 3}]}, + "cases": [ + { + "comment": "Filter with and expression", + "expression": "foo[?a == `1` && b == `2`]", + "result": [{"a": 1, "b": 2}] + }, + { + "expression": "foo[?a == `1` && b == `4`]", + "result": [] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Filter with Or and And expressions", + "expression": "foo[?c == `3` || a == `1` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "expression": "foo[?b == `2` || a == `3` && b == `4`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && b == `4` || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?(a == `3` && b == `4`) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?((a == `3` && b == `4`)) || b == `2`]", + "result": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && (b == `4` || b == `2`)]", + "result": [{"a": 3, "b": 4}] + }, + { + "expression": "foo[?a == `3` && ((b == `4` || b == `2`))]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": {"foo": [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4}]}, + "cases": [ + { + "comment": "Verify precedence of or/and expressions", + "expression": "foo[?a == `1` || b ==`2` && c == `5`]", + "result": [{"a": 1, "b": 2, "c": 3}] + }, + { + "comment": "Parentheses can alter precedence", + "expression": "foo[?(a == `1` || b ==`2`) && c == `5`]", + "result": [] + }, + { + "comment": "Not expressions combined with and/or", + "expression": "foo[?!(a == `1` || b ==`2`)]", + "result": [{"a": 3, "b": 4}] + } + ] + }, + { + "given": { + "foo": [ + {"key": true}, + {"key": false}, + {"key": []}, + {"key": {}}, + {"key": [0]}, + {"key": {"a": "b"}}, + {"key": 0}, + {"key": 1}, + {"key": null}, + {"notkey": true} + ] + }, + "cases": [ + { + "comment": "Unary filter expression", + "expression": "foo[?key]", + "result": [ + {"key": true}, {"key": [0]}, {"key": {"a": "b"}}, + {"key": 0}, {"key": 1} + ] + }, + { + "comment": "Unary not filter expression", + "expression": "foo[?!key]", + "result": [ + {"key": false}, {"key": []}, {"key": {}}, + {"key": null}, {"notkey": true} + ] + }, + { + "comment": "Equality with null RHS", + "expression": "foo[?key == `null`]", + "result": [ + {"key": null}, {"notkey": true} + ] + } + ] + }, + { + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + "cases": [ + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ < `5`]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?`5` > @]", + "result": [0, 1, 2, 3, 4] + }, + { + "comment": "Using @ in a filter expression", + "expression": "foo[?@ == @]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json new file mode 100644 index 00000000..8b8db363 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/functions.json @@ -0,0 +1,825 @@ +[{ + "given": + { + "foo": -1, + "zero": 0, + "numbers": [-1, 3, 4, 5], + "array": [-1, 3, 4, 5, "a", "100"], + "strings": ["a", "b", "c"], + "decimals": [1.01, 1.2, -1.5], + "str": "Str", + "false": false, + "empty_list": [], + "empty_hash": {}, + "objects": {"foo": "bar", "bar": "baz"}, + "null_key": null + }, + "cases": [ + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(foo)", + "result": 1 + }, + { + "expression": "abs(str)", + "error": "invalid-type" + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(array[1])", + "result": 3 + }, + { + "expression": "abs(`false`)", + "error": "invalid-type" + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`-24`)", + "result": 24 + }, + { + "expression": "abs(`1`, `2`)", + "error": "invalid-arity" + }, + { + "expression": "abs()", + "error": "invalid-arity" + }, + { + "expression": "unknown_function(`1`, `2`)", + "error": "unknown-function" + }, + { + "expression": "avg(numbers)", + "result": 2.75 + }, + { + "expression": "avg(array)", + "error": "invalid-type" + }, + { + "expression": "avg('abc')", + "error": "invalid-type" + }, + { + "expression": "avg(foo)", + "error": "invalid-type" + }, + { + "expression": "avg(@)", + "error": "invalid-type" + }, + { + "expression": "avg(strings)", + "error": "invalid-type" + }, + { + "expression": "ceil(`1.2`)", + "result": 2 + }, + { + "expression": "ceil(decimals[0])", + "result": 2 + }, + { + "expression": "ceil(decimals[1])", + "result": 2 + }, + { + "expression": "ceil(decimals[2])", + "result": -1 + }, + { + "expression": "ceil('string')", + "error": "invalid-type" + }, + { + "expression": "contains('abc', 'a')", + "result": true + }, + { + "expression": "contains('abc', 'd')", + "result": false + }, + { + "expression": "contains(`false`, 'd')", + "error": "invalid-type" + }, + { + "expression": "contains(strings, 'a')", + "result": true + }, + { + "expression": "contains(decimals, `1.2`)", + "result": true + }, + { + "expression": "contains(decimals, `false`)", + "result": false + }, + { + "expression": "ends_with(str, 'r')", + "result": true + }, + { + "expression": "ends_with(str, 'tr')", + "result": true + }, + { + "expression": "ends_with(str, 'Str')", + "result": true + }, + { + "expression": "ends_with(str, 'SStr')", + "result": false + }, + { + "expression": "ends_with(str, 'foo')", + "result": false + }, + { + "expression": "ends_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "floor(`1.2`)", + "result": 1 + }, + { + "expression": "floor('string')", + "error": "invalid-type" + }, + { + "expression": "floor(decimals[0])", + "result": 1 + }, + { + "expression": "floor(foo)", + "result": -1 + }, + { + "expression": "floor(str)", + "error": "invalid-type" + }, + { + "expression": "length('abc')", + "result": 3 + }, + { + "expression": "length('✓foo')", + "result": 4 + }, + { + "expression": "length('')", + "result": 0 + }, + { + "expression": "length(@)", + "result": 12 + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "length(str)", + "result": 3 + }, + { + "expression": "length(array)", + "result": 6 + }, + { + "expression": "length(objects)", + "result": 2 + }, + { + "expression": "length(`false`)", + "error": "invalid-type" + }, + { + "expression": "length(foo)", + "error": "invalid-type" + }, + { + "expression": "length(strings[0])", + "result": 1 + }, + { + "expression": "max(numbers)", + "result": 5 + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(strings)", + "result": "c" + }, + { + "expression": "max(abc)", + "error": "invalid-type" + }, + { + "expression": "max(array)", + "error": "invalid-type" + }, + { + "expression": "max(decimals)", + "result": 1.2 + }, + { + "expression": "max(empty_list)", + "result": null + }, + { + "expression": "merge(`{}`)", + "result": {} + }, + { + "expression": "merge(`{}`, `{}`)", + "result": {} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"b\": 2}`)", + "result": {"a": 1, "b": 2} + }, + { + "expression": "merge(`{\"a\": 1}`, `{\"a\": 2}`)", + "result": {"a": 2} + }, + { + "expression": "merge(`{\"a\": 1, \"b\": 2}`, `{\"a\": 2, \"c\": 3}`, `{\"d\": 4}`)", + "result": {"a": 2, "b": 2, "c": 3, "d": 4} + }, + { + "expression": "min(numbers)", + "result": -1 + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(abc)", + "error": "invalid-type" + }, + { + "expression": "min(array)", + "error": "invalid-type" + }, + { + "expression": "min(empty_list)", + "result": null + }, + { + "expression": "min(decimals)", + "result": -1.5 + }, + { + "expression": "min(strings)", + "result": "a" + }, + { + "expression": "type('abc')", + "result": "string" + }, + { + "expression": "type(`1.0`)", + "result": "number" + }, + { + "expression": "type(`2`)", + "result": "number" + }, + { + "expression": "type(`true`)", + "result": "boolean" + }, + { + "expression": "type(`false`)", + "result": "boolean" + }, + { + "expression": "type(`null`)", + "result": "null" + }, + { + "expression": "type(`[0]`)", + "result": "array" + }, + { + "expression": "type(`{\"a\": \"b\"}`)", + "result": "object" + }, + { + "expression": "type(@)", + "result": "object" + }, + { + "expression": "sort(keys(objects))", + "result": ["bar", "foo"] + }, + { + "expression": "keys(foo)", + "error": "invalid-type" + }, + { + "expression": "keys(strings)", + "error": "invalid-type" + }, + { + "expression": "keys(`false`)", + "error": "invalid-type" + }, + { + "expression": "sort(values(objects))", + "result": ["bar", "baz"] + }, + { + "expression": "keys(empty_hash)", + "result": [] + }, + { + "expression": "values(foo)", + "error": "invalid-type" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(', ', strings)", + "result": "a, b, c" + }, + { + "expression": "join(',', `[\"a\", \"b\"]`)", + "result": "a,b" + }, + { + "expression": "join(',', `[\"a\", 0]`)", + "error": "invalid-type" + }, + { + "expression": "join(', ', str)", + "error": "invalid-type" + }, + { + "expression": "join('|', strings)", + "result": "a|b|c" + }, + { + "expression": "join(`2`, strings)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals)", + "error": "invalid-type" + }, + { + "expression": "join('|', decimals[].to_string(@))", + "result": "1.01|1.2|-1.5" + }, + { + "expression": "join('|', empty_list)", + "result": "" + }, + { + "expression": "reverse(numbers)", + "result": [5, 4, 3, -1] + }, + { + "expression": "reverse(array)", + "result": ["100", "a", 5, 4, 3, -1] + }, + { + "expression": "reverse(`[]`)", + "result": [] + }, + { + "expression": "reverse('')", + "result": "" + }, + { + "expression": "reverse('hello world')", + "result": "dlrow olleh" + }, + { + "expression": "starts_with(str, 'S')", + "result": true + }, + { + "expression": "starts_with(str, 'St')", + "result": true + }, + { + "expression": "starts_with(str, 'Str')", + "result": true + }, + { + "expression": "starts_with(str, 'String')", + "result": false + }, + { + "expression": "starts_with(str, `0`)", + "error": "invalid-type" + }, + { + "expression": "sum(numbers)", + "result": 11 + }, + { + "expression": "sum(decimals)", + "result": 0.71 + }, + { + "expression": "sum(array)", + "error": "invalid-type" + }, + { + "expression": "sum(array[].to_number(@))", + "result": 111 + }, + { + "expression": "sum(`[]`)", + "result": 0 + }, + { + "expression": "to_array('foo')", + "result": ["foo"] + }, + { + "expression": "to_array(`0`)", + "result": [0] + }, + { + "expression": "to_array(objects)", + "result": [{"foo": "bar", "bar": "baz"}] + }, + { + "expression": "to_array(`[1, 2, 3]`)", + "result": [1, 2, 3] + }, + { + "expression": "to_array(false)", + "result": [false] + }, + { + "expression": "to_string('foo')", + "result": "foo" + }, + { + "expression": "to_string(`1.2`)", + "result": "1.2" + }, + { + "expression": "to_string(`[0, 1]`)", + "result": "[0,1]" + }, + { + "expression": "to_number('1.0')", + "result": 1.0 + }, + { + "expression": "to_number('1.1')", + "result": 1.1 + }, + { + "expression": "to_number('4')", + "result": 4 + }, + { + "expression": "to_number('notanumber')", + "result": null + }, + { + "expression": "to_number(`false`)", + "result": null + }, + { + "expression": "to_number(`null`)", + "result": null + }, + { + "expression": "to_number(`[0]`)", + "result": null + }, + { + "expression": "to_number(`{\"foo\": 0}`)", + "result": null + }, + { + "expression": "\"to_string\"(`1.0`)", + "error": "syntax" + }, + { + "expression": "sort(numbers)", + "result": [-1, 3, 4, 5] + }, + { + "expression": "sort(strings)", + "result": ["a", "b", "c"] + }, + { + "expression": "sort(decimals)", + "result": [-1.5, 1.01, 1.2] + }, + { + "expression": "sort(array)", + "error": "invalid-type" + }, + { + "expression": "sort(abc)", + "error": "invalid-type" + }, + { + "expression": "sort(empty_list)", + "result": [] + }, + { + "expression": "sort(@)", + "error": "invalid-type" + }, + { + "expression": "not_null(unknown_key, str)", + "result": "Str" + }, + { + "expression": "not_null(unknown_key, foo.bar, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(unknown_key, null_key, empty_list, str)", + "result": [] + }, + { + "expression": "not_null(all, expressions, are_null)", + "result": null + }, + { + "expression": "not_null()", + "error": "invalid-arity" + }, + { + "description": "function projection on single arg function", + "expression": "numbers[].to_string(@)", + "result": ["-1", "3", "4", "5"] + }, + { + "description": "function projection on single arg function", + "expression": "array[].to_number(@)", + "result": [-1, 3, 4, 5, 100] + } + ] +}, { + "given": + { + "foo": [ + {"b": "b", "a": "a"}, + {"c": "c", "b": "b"}, + {"d": "d", "c": "c"}, + {"e": "e", "d": "d"}, + {"f": "f", "e": "e"} + ] + }, + "cases": [ + { + "description": "function projection on variadic function", + "expression": "foo[].not_null(f, e, d, c, b, a)", + "result": ["b", "c", "d", "e", "f"] + } + ] +}, { + "given": + { + "people": [ + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"}, + {"age": 10, "age_str": "10", "bool": true, "name": 3} + ] + }, + "cases": [ + { + "description": "sort by field expression", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "expression": "sort_by(people, &age_str)", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "sort by function expression", + "expression": "sort_by(people, &to_number(age_str))", + "result": [ + {"age": 10, "age_str": "10", "bool": true, "name": 3}, + {"age": 20, "age_str": "20", "bool": true, "name": "a", "extra": "foo"}, + {"age": 30, "age_str": "30", "bool": true, "name": "c"}, + {"age": 40, "age_str": "40", "bool": false, "name": "b", "extra": "bar"}, + {"age": 50, "age_str": "50", "bool": false, "name": "d"} + ] + }, + { + "description": "function projection on sort_by function", + "expression": "sort_by(people, &age)[].name", + "result": [3, "a", "c", "b", "d"] + }, + { + "expression": "sort_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, name)", + "error": "invalid-type" + }, + { + "expression": "sort_by(people, &age)[].extra", + "result": ["foo", "bar"] + }, + { + "expression": "sort_by(`[]`, &age)", + "result": [] + }, + { + "expression": "max_by(people, &age)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &age_str)", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "max_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "max_by(people, &to_number(age_str))", + "result": {"age": 50, "age_str": "50", "bool": false, "name": "d"} + }, + { + "expression": "min_by(people, &age)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &age_str)", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + }, + { + "expression": "min_by(people, &bool)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &extra)", + "error": "invalid-type" + }, + { + "expression": "min_by(people, &to_number(age_str))", + "result": {"age": 10, "age_str": "10", "bool": true, "name": 3} + } + ] +}, { + "given": + { + "people": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + }, + "cases": [ + { + "description": "stable sort order", + "expression": "sort_by(people, &age)", + "result": [ + {"age": 10, "order": "1"}, + {"age": 10, "order": "2"}, + {"age": 10, "order": "3"}, + {"age": 10, "order": "4"}, + {"age": 10, "order": "5"}, + {"age": 10, "order": "6"}, + {"age": 10, "order": "7"}, + {"age": 10, "order": "8"}, + {"age": 10, "order": "9"}, + {"age": 10, "order": "10"}, + {"age": 10, "order": "11"} + ] + } + ] +}, { + "given": + { + "people": [ + {"a": 10, "b": 1, "c": "z"}, + {"a": 10, "b": 2, "c": null}, + {"a": 10, "b": 3}, + {"a": 10, "b": 4, "c": "z"}, + {"a": 10, "b": 5, "c": null}, + {"a": 10, "b": 6}, + {"a": 10, "b": 7, "c": "z"}, + {"a": 10, "b": 8, "c": null}, + {"a": 10, "b": 9} + ], + "empty": [] + }, + "cases": [ + { + "expression": "map(&a, people)", + "result": [10, 10, 10, 10, 10, 10, 10, 10, 10] + }, + { + "expression": "map(&c, people)", + "result": ["z", null, null, "z", null, null, "z", null, null] + }, + { + "expression": "map(&a, badkey)", + "error": "invalid-type" + }, + { + "expression": "map(&foo, empty)", + "result": [] + } + ] +}, { + "given": { + "array": [ + { + "foo": {"bar": "yes1"} + }, + { + "foo": {"bar": "yes2"} + }, + { + "foo1": {"bar": "no"} + } + ]}, + "cases": [ + { + "expression": "map(&foo.bar, array)", + "result": ["yes1", "yes2", null] + }, + { + "expression": "map(&foo1.bar, array)", + "result": [null, null, "no"] + }, + { + "expression": "map(&foo.bar.baz, array)", + "result": [null, null, null] + } + ] +}, { + "given": { + "array": [[1, 2, 3, [4]], [5, 6, 7, [8, 9]]] + }, + "cases": [ + { + "expression": "map(&[], array)", + "result": [[1, 2, 3, 4], [5, 6, 7, 8, 9]] + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json new file mode 100644 index 00000000..7998a41a --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/identifiers.json @@ -0,0 +1,1377 @@ +[ + { + "given": { + "__L": true + }, + "cases": [ + { + "expression": "__L", + "result": true + } + ] + }, + { + "given": { + "!\r": true + }, + "cases": [ + { + "expression": "\"!\\r\"", + "result": true + } + ] + }, + { + "given": { + "Y_1623": true + }, + "cases": [ + { + "expression": "Y_1623", + "result": true + } + ] + }, + { + "given": { + "x": true + }, + "cases": [ + { + "expression": "x", + "result": true + } + ] + }, + { + "given": { + "\tF\uCebb": true + }, + "cases": [ + { + "expression": "\"\\tF\\uCebb\"", + "result": true + } + ] + }, + { + "given": { + " \t": true + }, + "cases": [ + { + "expression": "\" \\t\"", + "result": true + } + ] + }, + { + "given": { + " ": true + }, + "cases": [ + { + "expression": "\" \"", + "result": true + } + ] + }, + { + "given": { + "v2": true + }, + "cases": [ + { + "expression": "v2", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "_X": true + }, + "cases": [ + { + "expression": "_X", + "result": true + } + ] + }, + { + "given": { + "\t4\ud9da\udd15": true + }, + "cases": [ + { + "expression": "\"\\t4\\ud9da\\udd15\"", + "result": true + } + ] + }, + { + "given": { + "v24_W": true + }, + "cases": [ + { + "expression": "v24_W", + "result": true + } + ] + }, + { + "given": { + "H": true + }, + "cases": [ + { + "expression": "\"H\"", + "result": true + } + ] + }, + { + "given": { + "\f": true + }, + "cases": [ + { + "expression": "\"\\f\"", + "result": true + } + ] + }, + { + "given": { + "E4": true + }, + "cases": [ + { + "expression": "\"E4\"", + "result": true + } + ] + }, + { + "given": { + "!": true + }, + "cases": [ + { + "expression": "\"!\"", + "result": true + } + ] + }, + { + "given": { + "tM": true + }, + "cases": [ + { + "expression": "tM", + "result": true + } + ] + }, + { + "given": { + " [": true + }, + "cases": [ + { + "expression": "\" [\"", + "result": true + } + ] + }, + { + "given": { + "R!": true + }, + "cases": [ + { + "expression": "\"R!\"", + "result": true + } + ] + }, + { + "given": { + "_6W": true + }, + "cases": [ + { + "expression": "_6W", + "result": true + } + ] + }, + { + "given": { + "\uaBA1\r": true + }, + "cases": [ + { + "expression": "\"\\uaBA1\\r\"", + "result": true + } + ] + }, + { + "given": { + "tL7": true + }, + "cases": [ + { + "expression": "tL7", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\">\"", + "result": true + } + ] + }, + { + "given": { + "hvu": true + }, + "cases": [ + { + "expression": "hvu", + "result": true + } + ] + }, + { + "given": { + "; !": true + }, + "cases": [ + { + "expression": "\"; !\"", + "result": true + } + ] + }, + { + "given": { + "hU": true + }, + "cases": [ + { + "expression": "hU", + "result": true + } + ] + }, + { + "given": { + "!I\n\/": true + }, + "cases": [ + { + "expression": "\"!I\\n\\/\"", + "result": true + } + ] + }, + { + "given": { + "\uEEbF": true + }, + "cases": [ + { + "expression": "\"\\uEEbF\"", + "result": true + } + ] + }, + { + "given": { + "U)\t": true + }, + "cases": [ + { + "expression": "\"U)\\t\"", + "result": true + } + ] + }, + { + "given": { + "fa0_9": true + }, + "cases": [ + { + "expression": "fa0_9", + "result": true + } + ] + }, + { + "given": { + "/": true + }, + "cases": [ + { + "expression": "\"/\"", + "result": true + } + ] + }, + { + "given": { + "Gy": true + }, + "cases": [ + { + "expression": "Gy", + "result": true + } + ] + }, + { + "given": { + "\b": true + }, + "cases": [ + { + "expression": "\"\\b\"", + "result": true + } + ] + }, + { + "given": { + "<": true + }, + "cases": [ + { + "expression": "\"<\"", + "result": true + } + ] + }, + { + "given": { + "\t": true + }, + "cases": [ + { + "expression": "\"\\t\"", + "result": true + } + ] + }, + { + "given": { + "\t&\\\r": true + }, + "cases": [ + { + "expression": "\"\\t&\\\\\\r\"", + "result": true + } + ] + }, + { + "given": { + "#": true + }, + "cases": [ + { + "expression": "\"#\"", + "result": true + } + ] + }, + { + "given": { + "B__": true + }, + "cases": [ + { + "expression": "B__", + "result": true + } + ] + }, + { + "given": { + "\nS \n": true + }, + "cases": [ + { + "expression": "\"\\nS \\n\"", + "result": true + } + ] + }, + { + "given": { + "Bp": true + }, + "cases": [ + { + "expression": "Bp", + "result": true + } + ] + }, + { + "given": { + ",\t;": true + }, + "cases": [ + { + "expression": "\",\\t;\"", + "result": true + } + ] + }, + { + "given": { + "B_q": true + }, + "cases": [ + { + "expression": "B_q", + "result": true + } + ] + }, + { + "given": { + "\/+\t\n\b!Z": true + }, + "cases": [ + { + "expression": "\"\\/+\\t\\n\\b!Z\"", + "result": true + } + ] + }, + { + "given": { + "\udadd\udfc7\\ueFAc": true + }, + "cases": [ + { + "expression": "\"\udadd\udfc7\\\\ueFAc\"", + "result": true + } + ] + }, + { + "given": { + ":\f": true + }, + "cases": [ + { + "expression": "\":\\f\"", + "result": true + } + ] + }, + { + "given": { + "\/": true + }, + "cases": [ + { + "expression": "\"\\/\"", + "result": true + } + ] + }, + { + "given": { + "_BW_6Hg_Gl": true + }, + "cases": [ + { + "expression": "_BW_6Hg_Gl", + "result": true + } + ] + }, + { + "given": { + "\udbcf\udc02": true + }, + "cases": [ + { + "expression": "\"\udbcf\udc02\"", + "result": true + } + ] + }, + { + "given": { + "zs1DC": true + }, + "cases": [ + { + "expression": "zs1DC", + "result": true + } + ] + }, + { + "given": { + "__434": true + }, + "cases": [ + { + "expression": "__434", + "result": true + } + ] + }, + { + "given": { + "\udb94\udd41": true + }, + "cases": [ + { + "expression": "\"\udb94\udd41\"", + "result": true + } + ] + }, + { + "given": { + "Z_5": true + }, + "cases": [ + { + "expression": "Z_5", + "result": true + } + ] + }, + { + "given": { + "z_M_": true + }, + "cases": [ + { + "expression": "z_M_", + "result": true + } + ] + }, + { + "given": { + "YU_2": true + }, + "cases": [ + { + "expression": "YU_2", + "result": true + } + ] + }, + { + "given": { + "_0": true + }, + "cases": [ + { + "expression": "_0", + "result": true + } + ] + }, + { + "given": { + "\b+": true + }, + "cases": [ + { + "expression": "\"\\b+\"", + "result": true + } + ] + }, + { + "given": { + "\"": true + }, + "cases": [ + { + "expression": "\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "D7": true + }, + "cases": [ + { + "expression": "D7", + "result": true + } + ] + }, + { + "given": { + "_62L": true + }, + "cases": [ + { + "expression": "_62L", + "result": true + } + ] + }, + { + "given": { + "\tK\t": true + }, + "cases": [ + { + "expression": "\"\\tK\\t\"", + "result": true + } + ] + }, + { + "given": { + "\n\\\f": true + }, + "cases": [ + { + "expression": "\"\\n\\\\\\f\"", + "result": true + } + ] + }, + { + "given": { + "I_": true + }, + "cases": [ + { + "expression": "I_", + "result": true + } + ] + }, + { + "given": { + "W_a0_": true + }, + "cases": [ + { + "expression": "W_a0_", + "result": true + } + ] + }, + { + "given": { + "BQ": true + }, + "cases": [ + { + "expression": "BQ", + "result": true + } + ] + }, + { + "given": { + "\tX$\uABBb": true + }, + "cases": [ + { + "expression": "\"\\tX$\\uABBb\"", + "result": true + } + ] + }, + { + "given": { + "Z9": true + }, + "cases": [ + { + "expression": "Z9", + "result": true + } + ] + }, + { + "given": { + "\b%\"\uda38\udd0f": true + }, + "cases": [ + { + "expression": "\"\\b%\\\"\uda38\udd0f\"", + "result": true + } + ] + }, + { + "given": { + "_F": true + }, + "cases": [ + { + "expression": "_F", + "result": true + } + ] + }, + { + "given": { + "!,": true + }, + "cases": [ + { + "expression": "\"!,\"", + "result": true + } + ] + }, + { + "given": { + "\"!": true + }, + "cases": [ + { + "expression": "\"\\\"!\"", + "result": true + } + ] + }, + { + "given": { + "Hh": true + }, + "cases": [ + { + "expression": "Hh", + "result": true + } + ] + }, + { + "given": { + "&": true + }, + "cases": [ + { + "expression": "\"&\"", + "result": true + } + ] + }, + { + "given": { + "9\r\\R": true + }, + "cases": [ + { + "expression": "\"9\\r\\\\R\"", + "result": true + } + ] + }, + { + "given": { + "M_k": true + }, + "cases": [ + { + "expression": "M_k", + "result": true + } + ] + }, + { + "given": { + "!\b\n\udb06\ude52\"\"": true + }, + "cases": [ + { + "expression": "\"!\\b\\n\udb06\ude52\\\"\\\"\"", + "result": true + } + ] + }, + { + "given": { + "6": true + }, + "cases": [ + { + "expression": "\"6\"", + "result": true + } + ] + }, + { + "given": { + "_7": true + }, + "cases": [ + { + "expression": "_7", + "result": true + } + ] + }, + { + "given": { + "0": true + }, + "cases": [ + { + "expression": "\"0\"", + "result": true + } + ] + }, + { + "given": { + "\\8\\": true + }, + "cases": [ + { + "expression": "\"\\\\8\\\\\"", + "result": true + } + ] + }, + { + "given": { + "b7eo": true + }, + "cases": [ + { + "expression": "b7eo", + "result": true + } + ] + }, + { + "given": { + "xIUo9": true + }, + "cases": [ + { + "expression": "xIUo9", + "result": true + } + ] + }, + { + "given": { + "5": true + }, + "cases": [ + { + "expression": "\"5\"", + "result": true + } + ] + }, + { + "given": { + "?": true + }, + "cases": [ + { + "expression": "\"?\"", + "result": true + } + ] + }, + { + "given": { + "sU": true + }, + "cases": [ + { + "expression": "sU", + "result": true + } + ] + }, + { + "given": { + "VH2&H\\\/": true + }, + "cases": [ + { + "expression": "\"VH2&H\\\\\\/\"", + "result": true + } + ] + }, + { + "given": { + "_C": true + }, + "cases": [ + { + "expression": "_C", + "result": true + } + ] + }, + { + "given": { + "_": true + }, + "cases": [ + { + "expression": "_", + "result": true + } + ] + }, + { + "given": { + "<\t": true + }, + "cases": [ + { + "expression": "\"<\\t\"", + "result": true + } + ] + }, + { + "given": { + "\uD834\uDD1E": true + }, + "cases": [ + { + "expression": "\"\\uD834\\uDD1E\"", + "result": true + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json new file mode 100644 index 00000000..aa03b35d --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/indices.json @@ -0,0 +1,346 @@ +[{ + "given": + {"foo": {"bar": ["zero", "one", "two"]}}, + "cases": [ + { + "expression": "foo.bar[0]", + "result": "zero" + }, + { + "expression": "foo.bar[1]", + "result": "one" + }, + { + "expression": "foo.bar[2]", + "result": "two" + }, + { + "expression": "foo.bar[3]", + "result": null + }, + { + "expression": "foo.bar[-1]", + "result": "two" + }, + { + "expression": "foo.bar[-2]", + "result": "one" + }, + { + "expression": "foo.bar[-3]", + "result": "zero" + }, + { + "expression": "foo.bar[-4]", + "result": null + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo[0].bar", + "result": "one" + }, + { + "expression": "foo[1].bar", + "result": "two" + }, + { + "expression": "foo[2].bar", + "result": "three" + }, + { + "expression": "foo[3].notbar", + "result": "four" + }, + { + "expression": "foo[3].bar", + "result": null + }, + { + "expression": "foo[0]", + "result": {"bar": "one"} + }, + { + "expression": "foo[1]", + "result": {"bar": "two"} + }, + { + "expression": "foo[2]", + "result": {"bar": "three"} + }, + { + "expression": "foo[3]", + "result": {"notbar": "four"} + }, + { + "expression": "foo[4]", + "result": null + } + ] +}, +{ + "given": [ + "one", "two", "three" + ], + "cases": [ + { + "expression": "[0]", + "result": "one" + }, + { + "expression": "[1]", + "result": "two" + }, + { + "expression": "[2]", + "result": "three" + }, + { + "expression": "[-1]", + "result": "three" + }, + { + "expression": "[-2]", + "result": "two" + }, + { + "expression": "[-3]", + "result": "one" + } + ] +}, +{ + "given": {"reservations": [ + {"instances": [{"foo": 1}, {"foo": 2}]} + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo", + "result": [1, 2] + }, + { + "expression": "reservations[].instances[].bar", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + } + ] +}, +{ + "given": {"reservations": [{ + "instances": [ + {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"foo": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"foo": "bar"}, + {"notfoo": [{"bar": 20}, {"bar": 21}, {"notbar": [7]}, {"bar": 22}]}, + {"bar": [{"baz": [1]}, {"baz": [2]}, {"baz": [3]}, {"baz": [4]}]}, + {"baz": [{"baz": [1, 2]}, {"baz": []}, {"baz": []}, {"baz": [3, 4]}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + }, { + "instances": [ + {"a": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]}, + {"b": [{"bar": 5}, {"bar": 6}, {"notbar": [7]}, {"bar": 8}]}, + {"c": "bar"}, + {"notfoo": [{"bar": 23}, {"bar": 24}, {"notbar": [7]}, {"bar": 25}]}, + {"qux": [{"baz": []}, {"baz": [1, 2, 3]}, {"baz": [4]}, {"baz": []}]} + ], + "otherkey": {"foo": [{"bar": 1}, {"bar": 2}, {"notbar": 3}, {"bar": 4}]} + } + ]}, + "cases": [ + { + "expression": "reservations[].instances[].foo[].bar", + "result": [1, 2, 4, 5, 6, 8] + }, + { + "expression": "reservations[].instances[].foo[].baz", + "result": [] + }, + { + "expression": "reservations[].instances[].notfoo[].bar", + "result": [20, 21, 22, 23, 24, 25] + }, + { + "expression": "reservations[].instances[].notfoo[].notbar", + "result": [[7], [7]] + }, + { + "expression": "reservations[].notinstances[].foo", + "result": [] + }, + { + "expression": "reservations[].instances[].foo[].notbar", + "result": [3, [7]] + }, + { + "expression": "reservations[].instances[].bar[].baz", + "result": [[1], [2], [3], [4]] + }, + { + "expression": "reservations[].instances[].baz[].baz", + "result": [[1, 2], [], [], [3, 4]] + }, + { + "expression": "reservations[].instances[].qux[].baz", + "result": [[], [1, 2, 3], [4], [], [], [1, 2, 3], [4], []] + }, + { + "expression": "reservations[].instances[].qux[].baz[]", + "result": [1, 2, 3, 4, 1, 2, 3, 4] + } + ] +}, +{ + "given": { + "foo": [ + [["one", "two"], ["three", "four"]], + [["five", "six"], ["seven", "eight"]], + [["nine"], ["ten"]] + ] + }, + "cases": [ + { + "expression": "foo[]", + "result": [["one", "two"], ["three", "four"], ["five", "six"], + ["seven", "eight"], ["nine"], ["ten"]] + }, + { + "expression": "foo[][0]", + "result": ["one", "three", "five", "seven", "nine", "ten"] + }, + { + "expression": "foo[][1]", + "result": ["two", "four", "six", "eight"] + }, + { + "expression": "foo[][0][0]", + "result": [] + }, + { + "expression": "foo[][2][2]", + "result": [] + }, + { + "expression": "foo[][0][0][100]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].baz", + "result": [1, 3, 5, 7] + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[]", + "result": null + }, + { + "expression": "hash[]", + "result": null + }, + { + "expression": "number[]", + "result": null + }, + { + "expression": "nullvalue[]", + "result": null + }, + { + "expression": "string[].foo", + "result": null + }, + { + "expression": "hash[].foo", + "result": null + }, + { + "expression": "number[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo", + "result": null + }, + { + "expression": "nullvalue[].foo[].bar", + "result": null + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json new file mode 100644 index 00000000..c6706b97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/literal.json @@ -0,0 +1,185 @@ +[ + { + "given": { + "foo": [{"name": "a"}, {"name": "b"}], + "bar": {"baz": "qux"} + }, + "cases": [ + { + "expression": "`\"foo\"`", + "result": "foo" + }, + { + "comment": "Interpret escaped unicode.", + "expression": "`\"\\u03a6\"`", + "result": "Φ" + }, + { + "expression": "`\"✓\"`", + "result": "✓" + }, + { + "expression": "`[1, 2, 3]`", + "result": [1, 2, 3] + }, + { + "expression": "`{\"a\": \"b\"}`", + "result": {"a": "b"} + }, + { + "expression": "`true`", + "result": true + }, + { + "expression": "`false`", + "result": false + }, + { + "expression": "`null`", + "result": null + }, + { + "expression": "`0`", + "result": 0 + }, + { + "expression": "`1`", + "result": 1 + }, + { + "expression": "`2`", + "result": 2 + }, + { + "expression": "`3`", + "result": 3 + }, + { + "expression": "`4`", + "result": 4 + }, + { + "expression": "`5`", + "result": 5 + }, + { + "expression": "`6`", + "result": 6 + }, + { + "expression": "`7`", + "result": 7 + }, + { + "expression": "`8`", + "result": 8 + }, + { + "expression": "`9`", + "result": 9 + }, + { + "comment": "Escaping a backtick in quotes", + "expression": "`\"foo\\`bar\"`", + "result": "foo`bar" + }, + { + "comment": "Double quote in literal", + "expression": "`\"foo\\\"bar\"`", + "result": "foo\"bar" + }, + { + "expression": "`\"1\\`\"`", + "result": "1`" + }, + { + "comment": "Multiple literal expressions with escapes", + "expression": "`\"\\\\\"`.{a:`\"b\"`}", + "result": {"a": "b"} + }, + { + "comment": "literal . identifier", + "expression": "`{\"a\": \"b\"}`.a", + "result": "b" + }, + { + "comment": "literal . identifier . identifier", + "expression": "`{\"a\": {\"b\": \"c\"}}`.a.b", + "result": "c" + }, + { + "comment": "literal . identifier bracket-expr", + "expression": "`[0, 1, 2]`[1]", + "result": 1 + } + ] + }, + { + "comment": "Literals", + "given": {"type": "object"}, + "cases": [ + { + "comment": "Literal with leading whitespace", + "expression": "` {\"foo\": true}`", + "result": {"foo": true} + }, + { + "comment": "Literal with trailing whitespace", + "expression": "`{\"foo\": true} `", + "result": {"foo": true} + }, + { + "comment": "Literal on RHS of subexpr not allowed", + "expression": "foo.`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Raw String Literals", + "given": {}, + "cases": [ + { + "expression": "'foo'", + "result": "foo" + }, + { + "expression": "' foo '", + "result": " foo " + }, + { + "expression": "'0'", + "result": "0" + }, + { + "expression": "'newline\n'", + "result": "newline\n" + }, + { + "expression": "'\n'", + "result": "\n" + }, + { + "expression": "'✓'", + "result": "✓" + }, + { + "expression": "'𝄞'", + "result": "𝄞" + }, + { + "expression": "' [foo] '", + "result": " [foo] " + }, + { + "expression": "'[foo]'", + "result": "[foo]" + }, + { + "comment": "Do not interpret escaped unicode.", + "expression": "'\\u03a6'", + "result": "\\u03a6" + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json new file mode 100644 index 00000000..8f2a481e --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/multiselect.json @@ -0,0 +1,393 @@ +[{ + "given": { + "foo": { + "bar": "bar", + "baz": "baz", + "qux": "qux", + "nested": { + "one": { + "a": "first", + "b": "second", + "c": "third" + }, + "two": { + "a": "first", + "b": "second", + "c": "third" + }, + "three": { + "a": "first", + "b": "second", + "c": {"inner": "third"} + } + } + }, + "bar": 1, + "baz": 2, + "qux\"": 3 + }, + "cases": [ + { + "expression": "foo.{bar: bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"bar\": bar}", + "result": {"bar": "bar"} + }, + { + "expression": "foo.{\"foo.bar\": bar}", + "result": {"foo.bar": "bar"} + }, + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{\"bar\": bar, \"baz\": baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "{\"baz\": baz, \"qux\\\"\": \"qux\\\"\"}", + "result": {"baz": 2, "qux\"": 3} + }, + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": "bar", "baz": "baz"} + }, + { + "expression": "foo.{bar: bar,qux: qux}", + "result": {"bar": "bar", "qux": "qux"} + }, + { + "expression": "foo.{bar: bar, noexist: noexist}", + "result": {"bar": "bar", "noexist": null} + }, + { + "expression": "foo.{noexist: noexist, alsonoexist: alsonoexist}", + "result": {"noexist": null, "alsonoexist": null} + }, + { + "expression": "foo.badkey.{nokey: nokey, alsonokey: alsonokey}", + "result": null + }, + { + "expression": "foo.nested.*.{a: a,b: b}", + "result": [{"a": "first", "b": "second"}, + {"a": "first", "b": "second"}, + {"a": "first", "b": "second"}] + }, + { + "expression": "foo.nested.three.{a: a, cinner: c.inner}", + "result": {"a": "first", "cinner": "third"} + }, + { + "expression": "foo.nested.three.{a: a, c: c.inner.bad.key}", + "result": {"a": "first", "c": null} + }, + { + "expression": "foo.{a: nested.one.a, b: nested.two.b}", + "result": {"a": "first", "b": "second"} + }, + { + "expression": "{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "{bar: bar}", + "result": {"bar": 1} + }, + { + "expression": "{otherkey: bar}", + "result": {"otherkey": 1} + }, + { + "expression": "{no: no, exist: exist}", + "result": {"no": null, "exist": null} + }, + { + "expression": "foo.[bar]", + "result": ["bar"] + }, + { + "expression": "foo.[bar,baz]", + "result": ["bar", "baz"] + }, + { + "expression": "foo.[bar,qux]", + "result": ["bar", "qux"] + }, + { + "expression": "foo.[bar,noexist]", + "result": ["bar", null] + }, + { + "expression": "foo.[noexist,alsonoexist]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": [2, 3, 4]} + }, + "cases": [ + { + "expression": "foo.{bar:bar,baz:baz}", + "result": {"bar": 1, "baz": [2, 3, 4]} + }, + { + "expression": "foo.[bar,baz[0]]", + "result": [1, 2] + }, + { + "expression": "foo.[bar,baz[1]]", + "result": [1, 3] + }, + { + "expression": "foo.[bar,baz[2]]", + "result": [1, 4] + }, + { + "expression": "foo.[bar,baz[3]]", + "result": [1, null] + }, + { + "expression": "foo.[bar[0],baz[3]]", + "result": [null, null] + } + ] +}, { + "given": { + "foo": {"bar": 1, "baz": 2} + }, + "cases": [ + { + "expression": "foo.{bar: bar, baz: baz}", + "result": {"bar": 1, "baz": 2} + }, + { + "expression": "foo.[bar,baz]", + "result": [1, 2] + } + ] +}, { + "given": { + "foo": { + "bar": {"baz": [{"common": "first", "one": 1}, + {"common": "second", "two": 2}]}, + "ignoreme": 1, + "includeme": true + } + }, + "cases": [ + { + "expression": "foo.{bar: bar.baz[1],includeme: includeme}", + "result": {"bar": {"common": "second", "two": 2}, "includeme": true} + }, + { + "expression": "foo.{\"bar.baz.two\": bar.baz[1].two, includeme: includeme}", + "result": {"bar.baz.two": 2, "includeme": true} + }, + { + "expression": "foo.[includeme, bar.baz[*].common]", + "result": [true, ["first", "second"]] + }, + { + "expression": "foo.[includeme, bar.baz[*].none]", + "result": [true, []] + }, + { + "expression": "foo.[includeme, bar.baz[].common]", + "result": [true, ["first", "second"]] + } + ] +}, { + "given": { + "reservations": [{ + "instances": [ + {"id": "id1", + "name": "first"}, + {"id": "id2", + "name": "second"} + ]}, { + "instances": [ + {"id": "id3", + "name": "third"}, + {"id": "id4", + "name": "fourth"} + ]} + ]}, + "cases": [ + { + "expression": "reservations[*].instances[*].{id: id, name: name}", + "result": [[{"id": "id1", "name": "first"}, {"id": "id2", "name": "second"}], + [{"id": "id3", "name": "third"}, {"id": "id4", "name": "fourth"}]] + }, + { + "expression": "reservations[].instances[].{id: id, name: name}", + "result": [{"id": "id1", "name": "first"}, + {"id": "id2", "name": "second"}, + {"id": "id3", "name": "third"}, + {"id": "id4", "name": "fourth"}] + }, + { + "expression": "reservations[].instances[].[id, name]", + "result": [["id1", "first"], + ["id2", "second"], + ["id3", "third"], + ["id4", "fourth"]] + } + ] +}, +{ + "given": { + "foo": [{ + "bar": [ + { + "qux": 2, + "baz": 1 + }, + { + "qux": 4, + "baz": 3 + } + ] + }, + { + "bar": [ + { + "qux": 6, + "baz": 5 + }, + { + "qux": 8, + "baz": 7 + } + ] + } + ] + }, + "cases": [ + { + "expression": "foo", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[]", + "result": [{"bar": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}]}, + {"bar": [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]}] + }, + { + "expression": "foo[].bar", + "result": [[{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}], + [{"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}]] + }, + { + "expression": "foo[].bar[]", + "result": [{"qux": 2, "baz": 1}, {"qux": 4, "baz": 3}, + {"qux": 6, "baz": 5}, {"qux": 8, "baz": 7}] + }, + { + "expression": "foo[].bar[].[baz, qux]", + "result": [[1, 2], [3, 4], [5, 6], [7, 8]] + }, + { + "expression": "foo[].bar[].[baz]", + "result": [[1], [3], [5], [7]] + }, + { + "expression": "foo[].bar[].[baz, qux][]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "abc" + }, { + "bar": "def" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].bar, qux[0]]", + "result": [["abc", "def"], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].[bar, boo], qux[0]]", + "result": [[["a", "c" ], ["d", "f" ]], "zero"] + } + ] +}, +{ + "given": { + "foo": { + "baz": [ + { + "bar": "a", + "bam": "b", + "boo": "c" + }, { + "bar": "d", + "bam": "e", + "boo": "f" + } + ], + "qux": ["zero"] + } + }, + "cases": [ + { + "expression": "foo.[baz[*].not_there || baz[*].bar, qux[0]]", + "result": [["a", "d"], "zero"] + } + ] +}, +{ + "given": {"type": "object"}, + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*],*]", + "result": [null, ["object"]] + } + ] +}, +{ + "given": [], + "cases": [ + { + "comment": "Nested multiselect", + "expression": "[[*]]", + "result": [[]] + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json new file mode 100644 index 00000000..2127cf44 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/ormatch.json @@ -0,0 +1,59 @@ +[{ + "given": + {"outer": {"foo": "foo", "bar": "bar", "baz": "baz"}}, + "cases": [ + { + "expression": "outer.foo || outer.bar", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bar", + "result": "foo" + }, + { + "expression": "outer.bar || outer.baz", + "result": "bar" + }, + { + "expression": "outer.bar||outer.baz", + "result": "bar" + }, + { + "expression": "outer.bad || outer.foo", + "result": "foo" + }, + { + "expression": "outer.bad||outer.foo", + "result": "foo" + }, + { + "expression": "outer.foo || outer.bad", + "result": "foo" + }, + { + "expression": "outer.foo||outer.bad", + "result": "foo" + }, + { + "expression": "outer.bad || outer.alsobad", + "result": null + }, + { + "expression": "outer.bad||outer.alsobad", + "result": null + } + ] +}, { + "given": + {"outer": {"foo": "foo", "bool": false, "empty_list": [], "empty_string": ""}}, + "cases": [ + { + "expression": "outer.empty_string || outer.foo", + "result": "foo" + }, + { + "expression": "outer.nokey || outer.bool || outer.empty_list || outer.empty_string || outer.foo", + "result": "foo" + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json new file mode 100644 index 00000000..b10c0a49 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/pipe.json @@ -0,0 +1,131 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "subkey" + }, + "other": { + "baz": "subkey" + }, + "other2": { + "baz": "subkey" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + } + } + }, + "cases": [ + { + "expression": "foo.*.baz | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [1]", + "result": "subkey" + }, + { + "expression": "foo.*.baz | [2]", + "result": "subkey" + }, + { + "expression": "foo.bar.* | [0]", + "result": "subkey" + }, + { + "expression": "foo.*.notbaz | [*]", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | *.baz", + "result": ["subkey", "subkey"] + } + ] +}, { + "given": { + "foo": { + "bar": { + "baz": "one" + }, + "other": { + "baz": "two" + }, + "other2": { + "baz": "three" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["d", "e", "f"] + } + } + }, + "cases": [ + { + "expression": "foo | bar", + "result": {"baz": "one"} + }, + { + "expression": "foo | bar | baz", + "result": "one" + }, + { + "expression": "foo|bar| baz", + "result": "one" + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "not_there | [0]", + "result": null + }, + { + "expression": "[foo.bar, foo.other] | [0]", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | a", + "result": {"baz": "one"} + }, + { + "expression": "{\"a\": foo.bar, \"b\": foo.other} | b", + "result": {"baz": "two"} + }, + { + "expression": "foo.bam || foo.bar | baz", + "result": "one" + }, + { + "expression": "foo | not_there || bar", + "result": {"baz": "one"} + } + ] +}, { + "given": { + "foo": [{ + "bar": [{ + "baz": "one" + }, { + "baz": "two" + }] + }, { + "bar": [{ + "baz": "three" + }, { + "baz": "four" + }] + }] + }, + "cases": [ + { + "expression": "foo[*].bar[*] | [0][0]", + "result": {"baz": "one"} + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json new file mode 100644 index 00000000..35947727 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/slice.json @@ -0,0 +1,187 @@ +[{ + "given": { + "foo": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], + "bar": { + "baz": 1 + } + }, + "cases": [ + { + "expression": "bar[0:10]", + "result": null + }, + { + "expression": "foo[0:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[0:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::1]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:10:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[::]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[:]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[1:9]", + "result": [1, 2, 3, 4, 5, 6, 7, 8] + }, + { + "expression": "foo[0:10:2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[5:]", + "result": [5, 6, 7, 8, 9] + }, + { + "expression": "foo[5::2]", + "result": [5, 7, 9] + }, + { + "expression": "foo[::2]", + "result": [0, 2, 4, 6, 8] + }, + { + "expression": "foo[::-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[1::2]", + "result": [1, 3, 5, 7, 9] + }, + { + "expression": "foo[10:0:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1] + }, + { + "expression": "foo[10:5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:-2]", + "result": [8, 6, 4] + }, + { + "expression": "foo[0:20]", + "result": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + }, + { + "expression": "foo[10:-20:-1]", + "result": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + }, + { + "expression": "foo[10:-20]", + "result": [] + }, + { + "expression": "foo[-4:-1]", + "result": [6, 7, 8] + }, + { + "expression": "foo[:-5:-1]", + "result": [9, 8, 7, 6] + }, + { + "expression": "foo[8:2:0]", + "error": "invalid-value" + }, + { + "expression": "foo[8:2:0:1]", + "error": "syntax" + }, + { + "expression": "foo[8:2&]", + "error": "syntax" + }, + { + "expression": "foo[2:a:3]", + "error": "syntax" + } + ] +}, { + "given": { + "foo": [{"a": 1}, {"a": 2}, {"a": 3}], + "bar": [{"a": {"b": 1}}, {"a": {"b": 2}}, + {"a": {"b": 3}}], + "baz": 50 + }, + "cases": [ + { + "expression": "foo[:2].a", + "result": [1, 2] + }, + { + "expression": "foo[:2].b", + "result": [] + }, + { + "expression": "foo[:2].a.b", + "result": [] + }, + { + "expression": "bar[::-1].a.b", + "result": [3, 2, 1] + }, + { + "expression": "bar[:2].a.b", + "result": [1, 2] + }, + { + "expression": "baz[:2].a", + "result": null + } + ] +}, { + "given": [{"a": 1}, {"a": 2}, {"a": 3}], + "cases": [ + { + "expression": "[:]", + "result": [{"a": 1}, {"a": 2}, {"a": 3}] + }, + { + "expression": "[:2].a", + "result": [1, 2] + }, + { + "expression": "[::-1].a", + "result": [3, 2, 1] + }, + { + "expression": "[:2].b", + "result": [] + } + ] +}] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json new file mode 100644 index 00000000..003c2945 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/syntax.json @@ -0,0 +1,616 @@ +[{ + "comment": "Dot syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo.bar", + "result": null + }, + { + "expression": "foo.1", + "error": "syntax" + }, + { + "expression": "foo.-11", + "error": "syntax" + }, + { + "expression": "foo", + "result": null + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": "foo.", + "error": "syntax" + }, + { + "expression": ".foo", + "error": "syntax" + }, + { + "expression": "foo..bar", + "error": "syntax" + }, + { + "expression": "foo.bar.", + "error": "syntax" + }, + { + "expression": "foo[.]", + "error": "syntax" + } + ] +}, + { + "comment": "Simple token errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": ".", + "error": "syntax" + }, + { + "expression": ":", + "error": "syntax" + }, + { + "expression": ",", + "error": "syntax" + }, + { + "expression": "]", + "error": "syntax" + }, + { + "expression": "[", + "error": "syntax" + }, + { + "expression": "}", + "error": "syntax" + }, + { + "expression": "{", + "error": "syntax" + }, + { + "expression": ")", + "error": "syntax" + }, + { + "expression": "(", + "error": "syntax" + }, + { + "expression": "((&", + "error": "syntax" + }, + { + "expression": "a[", + "error": "syntax" + }, + { + "expression": "a]", + "error": "syntax" + }, + { + "expression": "a][", + "error": "syntax" + }, + { + "expression": "!", + "error": "syntax" + } + ] + }, + { + "comment": "Boolean syntax errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "![!(!", + "error": "syntax" + } + ] + }, + { + "comment": "Wildcard syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "*", + "result": ["object"] + }, + { + "expression": "*.*", + "result": [] + }, + { + "expression": "*.foo", + "result": [] + }, + { + "expression": "*[0]", + "result": [] + }, + { + "expression": ".*", + "error": "syntax" + }, + { + "expression": "*foo", + "error": "syntax" + }, + { + "expression": "*0", + "error": "syntax" + }, + { + "expression": "foo[*]bar", + "error": "syntax" + }, + { + "expression": "foo[*]*", + "error": "syntax" + } + ] + }, + { + "comment": "Flatten syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[]", + "result": null + } + ] + }, + { + "comment": "Simple bracket syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "[0]", + "result": null + }, + { + "expression": "[*]", + "result": null + }, + { + "expression": "*.[0]", + "error": "syntax" + }, + { + "expression": "*.[\"0\"]", + "result": [[null]] + }, + { + "expression": "[*].bar", + "result": null + }, + { + "expression": "[*][0]", + "result": null + }, + { + "expression": "foo[#]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select list syntax", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[0]", + "result": null + }, + { + "comment": "Valid multi-select of a list", + "expression": "foo[0, 1]", + "error": "syntax" + }, + { + "expression": "foo.[0]", + "error": "syntax" + }, + { + "expression": "foo.[*]", + "result": null + }, + { + "comment": "Multi-select of a list with trailing comma", + "expression": "foo[0, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo[0,", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with trailing comma and no close", + "expression": "foo.[a", + "error": "syntax" + }, + { + "comment": "Multi-select of a list with extra comma", + "expression": "foo[0,, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using identifier indices", + "expression": "foo[abc, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index", + "expression": "foo[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a list using an identifier index with trailing comma", + "expression": "foo[abc, ]", + "error": "syntax" + }, + { + "comment": "Valid multi-select of a hash using an identifier index", + "expression": "foo.[abc]", + "result": null + }, + { + "comment": "Valid multi-select of a hash", + "expression": "foo.[abc, def]", + "result": null + }, + { + "comment": "Multi-select of a hash using a numeric index", + "expression": "foo.[abc, 1]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with a trailing comma", + "expression": "foo.[abc, ]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash with extra commas", + "expression": "foo.[abc,, def]", + "error": "syntax" + }, + { + "comment": "Multi-select of a hash using number indices", + "expression": "foo.[0, 1]", + "error": "syntax" + } + ] + }, + { + "comment": "Multi-select hash syntax", + "given": {"type": "object"}, + "cases": [ + { + "comment": "No key or value", + "expression": "a{}", + "error": "syntax" + }, + { + "comment": "No closing token", + "expression": "a{", + "error": "syntax" + }, + { + "comment": "Not a key value pair", + "expression": "a{foo}", + "error": "syntax" + }, + { + "comment": "Missing value and closing character", + "expression": "a{foo:", + "error": "syntax" + }, + { + "comment": "Missing closing character", + "expression": "a{foo: 0", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a{foo:}", + "error": "syntax" + }, + { + "comment": "Trailing comma and no closing character", + "expression": "a{foo: 0, ", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a{foo: ,}", + "error": "syntax" + }, + { + "comment": "Accessing Array using an identifier", + "expression": "a{foo: bar}", + "error": "syntax" + }, + { + "expression": "a{foo: 0}", + "error": "syntax" + }, + { + "comment": "Missing key-value pair", + "expression": "a.{}", + "error": "syntax" + }, + { + "comment": "Not a key-value pair", + "expression": "a.{foo}", + "error": "syntax" + }, + { + "comment": "Missing value", + "expression": "a.{foo:}", + "error": "syntax" + }, + { + "comment": "Missing value with trailing comma", + "expression": "a.{foo: ,}", + "error": "syntax" + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar}", + "result": null + }, + { + "comment": "Valid multi-select hash extraction", + "expression": "a.{foo: bar, baz: bam}", + "result": null + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, }", + "error": "syntax" + }, + { + "comment": "Missing key in second key-value pair", + "expression": "a.{foo: bar, baz}", + "error": "syntax" + }, + { + "comment": "Missing value in second key-value pair", + "expression": "a.{foo: bar, baz:}", + "error": "syntax" + }, + { + "comment": "Trailing comma", + "expression": "a.{foo: bar, baz: bam, }", + "error": "syntax" + }, + { + "comment": "Nested multi select", + "expression": "{\"\\\\\":{\" \":*}}", + "result": {"\\": {" ": ["object"]}} + } + ] + }, + { + "comment": "Or expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo || bar", + "result": null + }, + { + "expression": "foo ||", + "error": "syntax" + }, + { + "expression": "foo.|| bar", + "error": "syntax" + }, + { + "expression": " || foo", + "error": "syntax" + }, + { + "expression": "foo || || foo", + "error": "syntax" + }, + { + "expression": "foo.[a || b]", + "result": null + }, + { + "expression": "foo.[a ||]", + "error": "syntax" + }, + { + "expression": "\"foo", + "error": "syntax" + } + ] + }, + { + "comment": "Filter expressions", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo[?bar==`\"baz\"`]", + "result": null + }, + { + "expression": "foo[? bar == `\"baz\"` ]", + "result": null + }, + { + "expression": "foo[ ?bar==`\"baz\"`]", + "error": "syntax" + }, + { + "expression": "foo[?bar==]", + "error": "syntax" + }, + { + "expression": "foo[?==]", + "error": "syntax" + }, + { + "expression": "foo[?==bar]", + "error": "syntax" + }, + { + "expression": "foo[?bar==baz?]", + "error": "syntax" + }, + { + "expression": "foo[?a.b.c==d.e.f]", + "result": null + }, + { + "expression": "foo[?bar==`[0, 1, 2]`]", + "result": null + }, + { + "expression": "foo[?bar==`[\"a\", \"b\", \"c\"]`]", + "result": null + }, + { + "comment": "Literal char not escaped", + "expression": "foo[?bar==`[\"foo`bar\"]`]", + "error": "syntax" + }, + { + "comment": "Literal char escaped", + "expression": "foo[?bar==`[\"foo\\`bar\"]`]", + "result": null + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar<>baz]", + "error": "syntax" + }, + { + "comment": "Unknown comparator", + "expression": "foo[?bar^baz]", + "error": "syntax" + }, + { + "expression": "foo[bar==baz]", + "error": "syntax" + }, + { + "comment": "Quoted identifier in filter expression no spaces", + "expression": "[?\"\\\\\">`\"foo\"`]", + "result": null + }, + { + "comment": "Quoted identifier in filter expression with spaces", + "expression": "[?\"\\\\\" > `\"foo\"`]", + "result": null + } + ] + }, + { + "comment": "Filter expression errors", + "given": {"type": "object"}, + "cases": [ + { + "expression": "bar.`\"anything\"`", + "error": "syntax" + }, + { + "expression": "bar.baz.noexists.`\"literal\"`", + "error": "syntax" + }, + { + "comment": "Literal wildcard projection", + "expression": "foo[*].`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[*].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.`\"literal\"`.`\"subliteral\"`", + "error": "syntax" + }, + { + "comment": "Projecting a literal onto an empty list", + "expression": "foo[*].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "foo[].name.noexist.`\"literal\"`", + "error": "syntax" + }, + { + "expression": "twolen[*].`\"foo\"`", + "error": "syntax" + }, + { + "comment": "Two level projection of a literal", + "expression": "twolen[*].threelen[*].`\"bar\"`", + "error": "syntax" + }, + { + "comment": "Two level flattened projection of a literal", + "expression": "twolen[].threelen[].`\"bar\"`", + "error": "syntax" + } + ] + }, + { + "comment": "Identifiers", + "given": {"type": "object"}, + "cases": [ + { + "expression": "foo", + "result": null + }, + { + "expression": "\"foo\"", + "result": null + }, + { + "expression": "\"\\\\\"", + "result": null + } + ] + }, + { + "comment": "Combined syntax", + "given": [], + "cases": [ + { + "expression": "*||*|*|*", + "result": null + }, + { + "expression": "*[]||[*]", + "result": [] + }, + { + "expression": "[*.*]", + "result": [null] + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json new file mode 100644 index 00000000..6b07b0b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/unicode.json @@ -0,0 +1,38 @@ +[ + { + "given": {"foo": [{"✓": "✓"}, {"✓": "✗"}]}, + "cases": [ + { + "expression": "foo[].\"✓\"", + "result": ["✓", "✗"] + } + ] + }, + { + "given": {"☯": true}, + "cases": [ + { + "expression": "\"☯\"", + "result": true + } + ] + }, + { + "given": {"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪": true}, + "cases": [ + { + "expression": "\"♪♫•*¨*•.¸¸❤¸¸.•*¨*•♫♪\"", + "result": true + } + ] + }, + { + "given": {"☃": true}, + "cases": [ + { + "expression": "\"☃\"", + "result": true + } + ] + } +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json new file mode 100644 index 00000000..3bcec302 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/compliance/wildcard.json @@ -0,0 +1,460 @@ +[{ + "given": { + "foo": { + "bar": { + "baz": "val" + }, + "other": { + "baz": "val" + }, + "other2": { + "baz": "val" + }, + "other3": { + "notbaz": ["a", "b", "c"] + }, + "other4": { + "notbaz": ["a", "b", "c"] + }, + "other5": { + "other": { + "a": 1, + "b": 1, + "c": 1 + } + } + } + }, + "cases": [ + { + "expression": "foo.*.baz", + "result": ["val", "val", "val"] + }, + { + "expression": "foo.bar.*", + "result": ["val"] + }, + { + "expression": "foo.*.notbaz", + "result": [["a", "b", "c"], ["a", "b", "c"]] + }, + { + "expression": "foo.*.notbaz[0]", + "result": ["a", "a"] + }, + { + "expression": "foo.*.notbaz[-1]", + "result": ["c", "c"] + } + ] +}, { + "given": { + "foo": { + "first-1": { + "second-1": "val" + }, + "first-2": { + "second-1": "val" + }, + "first-3": { + "second-1": "val" + } + } + }, + "cases": [ + { + "expression": "foo.*", + "result": [{"second-1": "val"}, {"second-1": "val"}, + {"second-1": "val"}] + }, + { + "expression": "foo.*.*", + "result": [["val"], ["val"], ["val"]] + }, + { + "expression": "foo.*.*.*", + "result": [[], [], []] + }, + { + "expression": "foo.*.*.*.*", + "result": [[], [], []] + } + ] +}, { + "given": { + "foo": { + "bar": "one" + }, + "other": { + "bar": "one" + }, + "nomatch": { + "notbar": "three" + } + }, + "cases": [ + { + "expression": "*.bar", + "result": ["one", "one"] + } + ] +}, { + "given": { + "top1": { + "sub1": {"foo": "one"} + }, + "top2": { + "sub1": {"foo": "one"} + } + }, + "cases": [ + { + "expression": "*", + "result": [{"sub1": {"foo": "one"}}, + {"sub1": {"foo": "one"}}] + }, + { + "expression": "*.sub1", + "result": [{"foo": "one"}, + {"foo": "one"}] + }, + { + "expression": "*.*", + "result": [[{"foo": "one"}], + [{"foo": "one"}]] + }, + { + "expression": "*.*.foo[]", + "result": ["one", "one"] + }, + { + "expression": "*.sub1.foo", + "result": ["one", "one"] + } + ] +}, +{ + "given": + {"foo": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}]}, + "cases": [ + { + "expression": "foo[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "foo[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": + [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}], + "cases": [ + { + "expression": "[*]", + "result": [{"bar": "one"}, {"bar": "two"}, {"bar": "three"}, {"notbar": "four"}] + }, + { + "expression": "[*].bar", + "result": ["one", "two", "three"] + }, + { + "expression": "[*].notbar", + "result": ["four"] + } + ] +}, +{ + "given": { + "foo": { + "bar": [ + {"baz": ["one", "two", "three"]}, + {"baz": ["four", "five", "six"]}, + {"baz": ["seven", "eight", "nine"]} + ] + } + }, + "cases": [ + { + "expression": "foo.bar[*].baz", + "result": [["one", "two", "three"], ["four", "five", "six"], ["seven", "eight", "nine"]] + }, + { + "expression": "foo.bar[*].baz[0]", + "result": ["one", "four", "seven"] + }, + { + "expression": "foo.bar[*].baz[1]", + "result": ["two", "five", "eight"] + }, + { + "expression": "foo.bar[*].baz[2]", + "result": ["three", "six", "nine"] + }, + { + "expression": "foo.bar[*].baz[3]", + "result": [] + } + ] +}, +{ + "given": { + "foo": { + "bar": [["one", "two"], ["three", "four"]] + } + }, + "cases": [ + { + "expression": "foo.bar[*]", + "result": [["one", "two"], ["three", "four"]] + }, + { + "expression": "foo.bar[0]", + "result": ["one", "two"] + }, + { + "expression": "foo.bar[0][0]", + "result": "one" + }, + { + "expression": "foo.bar[0][0][0]", + "result": null + }, + { + "expression": "foo.bar[0][0][0][0]", + "result": null + }, + { + "expression": "foo[0][0]", + "result": null + } + ] +}, +{ + "given": { + "foo": [ + {"bar": [{"kind": "basic"}, {"kind": "intermediate"}]}, + {"bar": [{"kind": "advanced"}, {"kind": "expert"}]}, + {"bar": "string"} + ] + + }, + "cases": [ + { + "expression": "foo[*].bar[*].kind", + "result": [["basic", "intermediate"], ["advanced", "expert"]] + }, + { + "expression": "foo[*].bar[0].kind", + "result": ["basic", "advanced"] + } + ] +}, +{ + "given": { + "foo": [ + {"bar": {"kind": "basic"}}, + {"bar": {"kind": "intermediate"}}, + {"bar": {"kind": "advanced"}}, + {"bar": {"kind": "expert"}}, + {"bar": "string"} + ] + }, + "cases": [ + { + "expression": "foo[*].bar.kind", + "result": ["basic", "intermediate", "advanced", "expert"] + } + ] +}, +{ + "given": { + "foo": [{"bar": ["one", "two"]}, {"bar": ["three", "four"]}, {"bar": ["five"]}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*].bar[1]", + "result": ["two", "four"] + }, + { + "expression": "foo[*].bar[2]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [{"bar": []}, {"bar": []}, {"bar": []}] + }, + "cases": [ + { + "expression": "foo[*].bar[0]", + "result": [] + } + ] +}, +{ + "given": { + "foo": [["one", "two"], ["three", "four"], ["five"]] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": ["one", "three", "five"] + }, + { + "expression": "foo[*][1]", + "result": ["two", "four"] + } + ] +}, +{ + "given": { + "foo": [ + [ + ["one", "two"], ["three", "four"] + ], [ + ["five", "six"], ["seven", "eight"] + ], [ + ["nine"], ["ten"] + ] + ] + }, + "cases": [ + { + "expression": "foo[*][0]", + "result": [["one", "two"], ["five", "six"], ["nine"]] + }, + { + "expression": "foo[*][1]", + "result": [["three", "four"], ["seven", "eight"], ["ten"]] + }, + { + "expression": "foo[*][0][0]", + "result": ["one", "five", "nine"] + }, + { + "expression": "foo[*][1][0]", + "result": ["three", "seven", "ten"] + }, + { + "expression": "foo[*][0][1]", + "result": ["two", "six"] + }, + { + "expression": "foo[*][1][1]", + "result": ["four", "eight"] + }, + { + "expression": "foo[*][2]", + "result": [] + }, + { + "expression": "foo[*][2][2]", + "result": [] + }, + { + "expression": "bar[*]", + "result": null + }, + { + "expression": "bar[*].baz[*]", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "bar", "bar": "baz"}, + "number": 23, + "nullvalue": null + }, + "cases": [ + { + "expression": "string[*]", + "result": null + }, + { + "expression": "hash[*]", + "result": null + }, + { + "expression": "number[*]", + "result": null + }, + { + "expression": "nullvalue[*]", + "result": null + }, + { + "expression": "string[*].foo", + "result": null + }, + { + "expression": "hash[*].foo", + "result": null + }, + { + "expression": "number[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo", + "result": null + }, + { + "expression": "nullvalue[*].foo[*].bar", + "result": null + } + ] +}, +{ + "given": { + "string": "string", + "hash": {"foo": "val", "bar": "val"}, + "number": 23, + "array": [1, 2, 3], + "nullvalue": null + }, + "cases": [ + { + "expression": "string.*", + "result": null + }, + { + "expression": "hash.*", + "result": ["val", "val"] + }, + { + "expression": "number.*", + "result": null + }, + { + "expression": "array.*", + "result": null + }, + { + "expression": "nullvalue.*", + "result": null + } + ] +}, +{ + "given": { + "a": [0, 1, 2], + "b": [0, 1, 2] + }, + "cases": [ + { + "expression": "*[0]", + "result": [0, 0] + } + ] +} +] diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 00000000..9b7cd89b --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go new file mode 100644 index 00000000..c7df0878 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/jmespath.go @@ -0,0 +1,13 @@ +package jmespath + +import "github.com/jmespath/go-jmespath" + +// Fuzz will fuzz test the JMESPath parser. +func Fuzz(data []byte) int { + p := jmespath.NewParser() + _, err := p.Parse(string(data)) + if err != nil { + return 1 + } + return 0 +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 00000000..13c74604 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 00000000..817900c8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 00000000..1240a175 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 00000000..dae79cbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 00000000..ddc1b7d7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/cmd/registry/main.go b/cmd/registry/main.go index 603a44a5..6e667c07 100644 --- a/cmd/registry/main.go +++ b/cmd/registry/main.go @@ -14,7 +14,8 @@ import ( _ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" _ "github.com/docker/distribution/registry/storage/driver/oss" - _ "github.com/docker/distribution/registry/storage/driver/s3" + _ "github.com/docker/distribution/registry/storage/driver/s3-aws" + _ "github.com/docker/distribution/registry/storage/driver/s3-goamz" _ "github.com/docker/distribution/registry/storage/driver/swift" ) diff --git a/registry/storage/driver/middleware/cloudfront/middleware.go b/registry/storage/driver/middleware/cloudfront/middleware.go index 56edda3a..9162c09d 100644 --- a/registry/storage/driver/middleware/cloudfront/middleware.go +++ b/registry/storage/driver/middleware/cloudfront/middleware.go @@ -8,12 +8,14 @@ import ( "encoding/pem" "fmt" "io/ioutil" + "net/url" + "strings" "time" + "github.com/aws/aws-sdk-go/service/cloudfront/sign" "github.com/docker/distribution/context" storagedriver "github.com/docker/distribution/registry/storage/driver" storagemiddleware "github.com/docker/distribution/registry/storage/driver/middleware" - "github.com/docker/goamz/cloudfront" ) // cloudFrontStorageMiddleware provides an simple implementation of layerHandler that @@ -21,8 +23,9 @@ import ( // then issues HTTP Temporary Redirects to this CloudFront content URL. type cloudFrontStorageMiddleware struct { storagedriver.StorageDriver - cloudfront *cloudfront.CloudFront - duration time.Duration + urlSigner *sign.URLSigner + baseURL string + duration time.Duration } var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} @@ -33,15 +36,24 @@ var _ storagedriver.StorageDriver = &cloudFrontStorageMiddleware{} func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error) { base, ok := options["baseurl"] if !ok { - return nil, fmt.Errorf("No baseurl provided") + return nil, fmt.Errorf("no baseurl provided") } baseURL, ok := base.(string) if !ok { return nil, fmt.Errorf("baseurl must be a string") } + if !strings.Contains(baseURL, "://") { + baseURL = "https://" + baseURL + } + if !strings.HasSuffix(baseURL, "/") { + baseURL += "/" + } + if _, err := url.Parse(baseURL); err != nil { + return nil, fmt.Errorf("invalid baseurl: %v", err) + } pk, ok := options["privatekey"] if !ok { - return nil, fmt.Errorf("No privatekey provided") + return nil, fmt.Errorf("no privatekey provided") } pkPath, ok := pk.(string) if !ok { @@ -49,7 +61,7 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o } kpid, ok := options["keypairid"] if !ok { - return nil, fmt.Errorf("No keypairid provided") + return nil, fmt.Errorf("no keypairid provided") } keypairID, ok := kpid.(string) if !ok { @@ -58,19 +70,19 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o pkBytes, err := ioutil.ReadFile(pkPath) if err != nil { - return nil, fmt.Errorf("Failed to read privatekey file: %s", err) + return nil, fmt.Errorf("failed to read privatekey file: %s", err) } block, _ := pem.Decode([]byte(pkBytes)) if block == nil { - return nil, fmt.Errorf("Failed to decode private key as an rsa private key") + return nil, fmt.Errorf("failed to decode private key as an rsa private key") } privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return nil, err } - cf := cloudfront.New(baseURL, privateKey, keypairID) + urlSigner := sign.NewURLSigner(keypairID, privateKey) duration := 20 * time.Minute d, ok := options["duration"] @@ -81,13 +93,18 @@ func newCloudFrontStorageMiddleware(storageDriver storagedriver.StorageDriver, o case string: dur, err := time.ParseDuration(d) if err != nil { - return nil, fmt.Errorf("Invalid duration: %s", err) + return nil, fmt.Errorf("invalid duration: %s", err) } duration = dur } } - return &cloudFrontStorageMiddleware{StorageDriver: storageDriver, cloudfront: cf, duration: duration}, nil + return &cloudFrontStorageMiddleware{ + StorageDriver: storageDriver, + urlSigner: urlSigner, + baseURL: baseURL, + duration: duration, + }, nil } // S3BucketKeyer is any type that is capable of returning the S3 bucket key @@ -106,7 +123,7 @@ func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, return lh.StorageDriver.URLFor(ctx, path, options) } - cfURL, err := lh.cloudfront.CannedSignedURL(keyer.S3BucketKey(path), "", time.Now().Add(lh.duration)) + cfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration)) if err != nil { return "", err } diff --git a/registry/storage/driver/s3-aws/s3.go b/registry/storage/driver/s3-aws/s3.go new file mode 100644 index 00000000..af62d3f0 --- /dev/null +++ b/registry/storage/driver/s3-aws/s3.go @@ -0,0 +1,966 @@ +// Package s3 provides a storagedriver.StorageDriver implementation to +// store blobs in Amazon S3 cloud storage. +// +// This package leverages the official aws client library for interfacing with +// S3. +// +// Because S3 is a key, value store the Stat call does not support last modification +// time for directories (directories are an abstraction for key, value stores) +// +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. +package s3 + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/Sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/client/transport" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/base" + "github.com/docker/distribution/registry/storage/driver/factory" +) + +const driverName = "s3aws" + +// minChunkSize defines the minimum multipart upload chunk size +// S3 API requires multipart upload chunks to be at least 5MB +const minChunkSize = 5 << 20 + +const defaultChunkSize = 2 * minChunkSize + +// listMax is the largest amount of objects you can request from S3 in a list call +const listMax = 1000 + +// validRegions maps known s3 region identifiers to region descriptors +var validRegions = map[string]struct{}{} + +//DriverParameters A struct that encapsulates all of the driver parameters after all values have been set +type DriverParameters struct { + AccessKey string + SecretKey string + Bucket string + Region string + Encrypt bool + Secure bool + ChunkSize int64 + RootDirectory string + StorageClass string + UserAgent string +} + +func init() { + for _, region := range []string{ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1", + "eu-central-1", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ap-northeast-2", + "sa-east-1", + } { + validRegions[region] = struct{}{} + } + + // Register this as the default s3 driver in addition to s3aws + factory.Register("s3", &s3DriverFactory{}) + factory.Register(driverName, &s3DriverFactory{}) +} + +// s3DriverFactory implements the factory.StorageDriverFactory interface +type s3DriverFactory struct{} + +func (factory *s3DriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { + return FromParameters(parameters) +} + +type driver struct { + S3 *s3.S3 + Bucket string + ChunkSize int64 + Encrypt bool + RootDirectory string + StorageClass string + + pool sync.Pool // pool []byte buffers used for WriteStream + zeros []byte // shared, zero-valued buffer used for WriteStream +} + +type baseEmbed struct { + base.Base +} + +// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3 +// Objects are stored at absolute keys in the provided bucket. +type Driver struct { + baseEmbed +} + +// FromParameters constructs a new Driver with a given parameters map +// Required parameters: +// - accesskey +// - secretkey +// - region +// - bucket +// - encrypt +func FromParameters(parameters map[string]interface{}) (*Driver, error) { + // Providing no values for these is valid in case the user is authenticating + // with an IAM on an ec2 instance (in which case the instance credentials will + // be summoned when GetAuth is called) + accessKey, ok := parameters["accesskey"] + if !ok { + accessKey = "" + } + secretKey, ok := parameters["secretkey"] + if !ok { + secretKey = "" + } + + regionName, ok := parameters["region"] + if !ok || fmt.Sprint(regionName) == "" { + return nil, fmt.Errorf("No region parameter provided") + } + region := fmt.Sprint(regionName) + _, ok = validRegions[region] + if !ok { + return nil, fmt.Errorf("Invalid region provided: %v", region) + } + + bucket, ok := parameters["bucket"] + if !ok || fmt.Sprint(bucket) == "" { + return nil, fmt.Errorf("No bucket parameter provided") + } + + encryptBool := false + encrypt, ok := parameters["encrypt"] + if ok { + encryptBool, ok = encrypt.(bool) + if !ok { + return nil, fmt.Errorf("The encrypt parameter should be a boolean") + } + } + + secureBool := true + secure, ok := parameters["secure"] + if ok { + secureBool, ok = secure.(bool) + if !ok { + return nil, fmt.Errorf("The secure parameter should be a boolean") + } + } + + chunkSize := int64(defaultChunkSize) + chunkSizeParam, ok := parameters["chunksize"] + if ok { + switch v := chunkSizeParam.(type) { + case string: + vv, err := strconv.ParseInt(v, 0, 64) + if err != nil { + return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) + } + chunkSize = vv + case int64: + chunkSize = v + case int, uint, int32, uint32, uint64: + chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() + default: + return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) + } + + if chunkSize < minChunkSize { + return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) + } + } + + rootDirectory, ok := parameters["rootdirectory"] + if !ok { + rootDirectory = "" + } + + storageClass := s3.StorageClassStandard + storageClassParam, ok := parameters["storageclass"] + if ok { + storageClassString, ok := storageClassParam.(string) + if !ok { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + // All valid storage class parameters are UPPERCASE, so be a bit more flexible here + storageClassString = strings.ToUpper(storageClassString) + if storageClassString != s3.StorageClassStandard && storageClassString != s3.StorageClassReducedRedundancy { + return nil, fmt.Errorf("The storageclass parameter must be one of %v, %v invalid", []string{s3.StorageClassStandard, s3.StorageClassReducedRedundancy}, storageClassParam) + } + storageClass = storageClassString + } + + userAgent, ok := parameters["useragent"] + if !ok { + userAgent = "" + } + + params := DriverParameters{ + fmt.Sprint(accessKey), + fmt.Sprint(secretKey), + fmt.Sprint(bucket), + region, + encryptBool, + secureBool, + chunkSize, + fmt.Sprint(rootDirectory), + storageClass, + fmt.Sprint(userAgent), + } + + return New(params) +} + +// New constructs a new Driver with the given AWS credentials, region, encryption flag, and +// bucketName +func New(params DriverParameters) (*Driver, error) { + awsConfig := aws.NewConfig() + creds := credentials.NewChainCredentials([]credentials.Provider{ + &credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: params.AccessKey, + SecretAccessKey: params.SecretKey, + }, + }, + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{}, + &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(session.New())}, + }) + + awsConfig.WithCredentials(creds) + awsConfig.WithRegion(params.Region) + awsConfig.WithDisableSSL(!params.Secure) + // awsConfig.WithMaxRetries(10) + + if params.UserAgent != "" { + awsConfig.WithHTTPClient(&http.Client{ + Transport: transport.NewTransport(http.DefaultTransport, transport.NewHeaderRequestModifier(http.Header{http.CanonicalHeaderKey("User-Agent"): []string{params.UserAgent}})), + }) + } + + s3obj := s3.New(session.New(awsConfig)) + + // TODO Currently multipart uploads have no timestamps, so this would be unwise + // if you initiated a new s3driver while another one is running on the same bucket. + // multis, _, err := bucket.ListMulti("", "") + // if err != nil { + // return nil, err + // } + + // for _, multi := range multis { + // err := multi.Abort() + // //TODO appropriate to do this error checking? + // if err != nil { + // return nil, err + // } + // } + + d := &driver{ + S3: s3obj, + Bucket: params.Bucket, + ChunkSize: params.ChunkSize, + Encrypt: params.Encrypt, + RootDirectory: params.RootDirectory, + StorageClass: params.StorageClass, + zeros: make([]byte, params.ChunkSize), + } + + d.pool.New = func() interface{} { + return make([]byte, d.ChunkSize) + } + + return &Driver{ + baseEmbed: baseEmbed{ + Base: base.Base{ + StorageDriver: d, + }, + }, + }, nil +} + +// Implement the storagedriver.StorageDriver interface + +func (d *driver) Name() string { + return driverName +} + +// GetContent retrieves the content stored at "path" as a []byte. +func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { + reader, err := d.ReadStream(ctx, path, 0) + if err != nil { + return nil, err + } + return ioutil.ReadAll(reader) +} + +// PutContent stores the []byte content at a location designated by "path". +func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { + _, err := d.S3.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + Body: bytes.NewReader(contents), + }) + return parseError(path, err) +} + +// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a +// given byte offset. +func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { + resp, err := d.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"), + }) + + if err != nil { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + + return nil, parseError(path, err) + } + return resp.Body, nil +} + +// WriteStream stores the contents of the provided io.Reader at a +// location designated by the given path. The driver will know it has +// received the full contents when the reader returns io.EOF. The number +// of successfully READ bytes will be returned, even if an error is +// returned. May be used to resume writing a stream by providing a nonzero +// offset. Offsets past the current size will write from the position +// beyond the end of the file. +func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { + var partNumber int64 = 1 + bytesRead := 0 + var putErrChan chan error + parts := []*s3.CompletedPart{} + done := make(chan struct{}) // stopgap to free up waiting goroutines + + resp, err := d.S3.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + }) + if err != nil { + return 0, err + } + + uploadID := resp.UploadId + + buf := d.getbuf() + + // We never want to leave a dangling multipart upload, our only consistent state is + // when there is a whole object at path. This is in order to remain consistent with + // the stat call. + // + // Note that if the machine dies before executing the defer, we will be left with a dangling + // multipart upload, which will eventually be cleaned up, but we will lose all of the progress + // made prior to the machine crashing. + defer func() { + if putErrChan != nil { + if putErr := <-putErrChan; putErr != nil { + err = putErr + } + } + + if len(parts) > 0 { + _, err := d.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + UploadId: uploadID, + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: parts, + }, + }) + if err != nil { + // TODO (brianbland): log errors here + d.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + UploadId: uploadID, + }) + } + } + + d.putbuf(buf) // needs to be here to pick up new buf value + close(done) // free up any waiting goroutines + }() + + // Fills from 0 to total from current + fromSmallCurrent := func(total int64) error { + current, err := d.ReadStream(ctx, path, 0) + if err != nil { + return err + } + + bytesRead = 0 + for int64(bytesRead) < total { + //The loop should very rarely enter a second iteration + nn, err := current.Read(buf[bytesRead:total]) + bytesRead += nn + if err != nil { + if err != io.EOF { + return err + } + + break + } + + } + return nil + } + + // Fills from parameter to chunkSize from reader + fromReader := func(from int64) error { + bytesRead = 0 + for from+int64(bytesRead) < d.ChunkSize { + nn, err := reader.Read(buf[from+int64(bytesRead):]) + totalRead += int64(nn) + bytesRead += nn + + if err != nil { + if err != io.EOF { + return err + } + + break + } + } + + if putErrChan == nil { + putErrChan = make(chan error) + } else { + if putErr := <-putErrChan; putErr != nil { + putErrChan = nil + return putErr + } + } + + go func(bytesRead int, from int64, buf []byte) { + defer d.putbuf(buf) // this buffer gets dropped after this call + + // DRAGONS(stevvooe): There are few things one might want to know + // about this section. First, the putErrChan is expecting an error + // and a nil or just a nil to come through the channel. This is + // covered by the silly defer below. The other aspect is the s3 + // retry backoff to deal with RequestTimeout errors. Even though + // the underlying s3 library should handle it, it doesn't seem to + // be part of the shouldRetry function (see AdRoll/goamz/s3). + defer func() { + select { + case putErrChan <- nil: // for some reason, we do this no matter what. + case <-done: + return // ensure we don't leak the goroutine + } + }() + + if bytesRead <= 0 { + return + } + + resp, err := d.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + Body: bytes.NewReader(buf[0 : int64(bytesRead)+from]), + }) + if err != nil { + logrus.Errorf("error putting part, aborting: %v", err) + select { + case putErrChan <- err: + case <-done: + return // don't leak the goroutine + } + } + + // parts and partNumber are safe, because this function is the + // only one modifying them and we force it to be executed + // serially. + parts = append(parts, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + }(bytesRead, from, buf) + + buf = d.getbuf() // use a new buffer for the next call + return nil + } + + if offset > 0 { + resp, err := d.S3.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + if err != nil { + if s3Err, ok := err.(awserr.Error); !ok || s3Err.Code() != "NoSuchKey" { + return 0, err + } + } + + currentLength := int64(0) + if err == nil && resp.ContentLength != nil { + currentLength = *resp.ContentLength + } + + if currentLength >= offset { + if offset < d.ChunkSize { + // chunkSize > currentLength >= offset + if err = fromSmallCurrent(offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // currentLength >= offset >= chunkSize + resp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + CopySource: aws.String(d.Bucket + "/" + d.s3Path(path)), + CopySourceRange: aws.String("bytes=0-" + strconv.FormatInt(offset-1, 10)), + }) + if err != nil { + return 0, err + } + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.CopyPartResult.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + } + } else { + // Fills between parameters with 0s but only when to - from <= chunkSize + fromZeroFillSmall := func(from, to int64) error { + bytesRead = 0 + for from+int64(bytesRead) < to { + nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) + bytesRead += nn + if err != nil { + return err + } + } + + return nil + } + + // Fills between parameters with 0s, making new parts + fromZeroFillLarge := func(from, to int64) error { + bytesRead64 := int64(0) + for to-(from+bytesRead64) >= d.ChunkSize { + resp, err := d.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + Body: bytes.NewReader(d.zeros), + }) + if err != nil { + return err + } + bytesRead64 += d.ChunkSize + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + } + + return fromZeroFillSmall(0, (to-from)%d.ChunkSize) + } + + // currentLength < offset + if currentLength < d.ChunkSize { + if offset < d.ChunkSize { + // chunkSize > offset > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset); err != nil { + return totalRead, err + } + + if totalRead+offset < d.ChunkSize { + return totalRead, nil + } + } else { + // offset >= chunkSize > currentLength + if err = fromSmallCurrent(currentLength); err != nil { + return totalRead, err + } + + if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { + return totalRead, err + } + + resp, err := d.S3.UploadPart(&s3.UploadPartInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + Body: bytes.NewReader(buf), + }) + if err != nil { + return totalRead, err + } + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + + //Zero fill from chunkSize up to offset, then some reader + if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { + return totalRead, err + } + + if err = fromReader(offset % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+(offset%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + } else { + // offset > currentLength >= chunkSize + resp, err := d.S3.UploadPartCopy(&s3.UploadPartCopyInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + PartNumber: aws.Int64(partNumber), + UploadId: uploadID, + CopySource: aws.String(d.Bucket + "/" + d.s3Path(path)), + }) + if err != nil { + return 0, err + } + + parts = append(parts, &s3.CompletedPart{ + ETag: resp.CopyPartResult.ETag, + PartNumber: aws.Int64(partNumber), + }) + partNumber++ + + //Zero fill from currentLength up to offset, then some reader + if err = fromZeroFillLarge(currentLength, offset); err != nil { + return totalRead, err + } + + if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { + return totalRead, err + } + + if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { + return totalRead, nil + } + } + + } + } + + for { + if err = fromReader(0); err != nil { + return totalRead, err + } + + if int64(bytesRead) < d.ChunkSize { + break + } + } + + return totalRead, nil +} + +// Stat retrieves the FileInfo for the given path, including the current size +// in bytes and the creation time. +func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + MaxKeys: aws.Int64(1), + }) + if err != nil { + return nil, err + } + + fi := storagedriver.FileInfoFields{ + Path: path, + } + + if len(resp.Contents) == 1 { + if *resp.Contents[0].Key != d.s3Path(path) { + fi.IsDir = true + } else { + fi.IsDir = false + fi.Size = *resp.Contents[0].Size + fi.ModTime = *resp.Contents[0].LastModified + } + } else if len(resp.CommonPrefixes) == 1 { + fi.IsDir = true + } else { + return nil, storagedriver.PathNotFoundError{Path: path} + } + + return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil +} + +// List returns a list of the objects that are direct descendants of the given path. +func (d *driver) List(ctx context.Context, opath string) ([]string, error) { + path := opath + if path != "/" && path[len(path)-1] != '/' { + path = path + "/" + } + + // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". + // In those cases, there is no root prefix to replace and we must actually add a "/" to all + // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp + prefix := "" + if d.s3Path("") == "" { + prefix = "/" + } + + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + }) + if err != nil { + return nil, parseError(opath, err) + } + + files := []string{} + directories := []string{} + + for { + for _, key := range resp.Contents { + files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1)) + } + + for _, commonPrefix := range resp.CommonPrefixes { + commonPrefix := *commonPrefix.Prefix + directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1)) + } + + if *resp.IsTruncated { + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + Delimiter: aws.String("/"), + MaxKeys: aws.Int64(listMax), + Marker: resp.NextMarker, + }) + if err != nil { + return nil, err + } + } else { + break + } + } + + if opath != "/" { + if len(files) == 0 && len(directories) == 0 { + // Treat empty response as missing directory, since we don't actually + // have directories in s3. + return nil, storagedriver.PathNotFoundError{Path: opath} + } + } + + return append(files, directories...), nil +} + +// Move moves an object stored at sourcePath to destPath, removing the original +// object. +func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { + /* This is terrible, but aws doesn't have an actual move. */ + _, err := d.S3.CopyObject(&s3.CopyObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(destPath)), + ContentType: d.getContentType(), + ACL: d.getACL(), + ServerSideEncryption: d.getEncryptionMode(), + StorageClass: d.getStorageClass(), + CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)), + }) + if err != nil { + return parseError(sourcePath, err) + } + + return d.Delete(ctx, sourcePath) +} + +// Delete recursively deletes all objects stored at "path" and its subpaths. +func (d *driver) Delete(ctx context.Context, path string) error { + resp, err := d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + }) + if err != nil || len(resp.Contents) == 0 { + return storagedriver.PathNotFoundError{Path: path} + } + + s3Objects := make([]*s3.ObjectIdentifier, 0, listMax) + + for len(resp.Contents) > 0 { + for _, key := range resp.Contents { + s3Objects = append(s3Objects, &s3.ObjectIdentifier{ + Key: key.Key, + }) + } + + _, err := d.S3.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: s3Objects, + Quiet: aws.Bool(false), + }, + }) + if err != nil { + return nil + } + + resp, err = d.S3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(d.Bucket), + Prefix: aws.String(d.s3Path(path)), + }) + if err != nil { + return err + } + } + + return nil +} + +// URLFor returns a URL which may be used to retrieve the content stored at the given path. +// May return an UnsupportedMethodErr in certain StorageDriver implementations. +func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { + methodString := "GET" + method, ok := options["method"] + if ok { + methodString, ok = method.(string) + if !ok || (methodString != "GET" && methodString != "HEAD") { + return "", storagedriver.ErrUnsupportedMethod{} + } + } + + expiresIn := 20 * time.Minute + expires, ok := options["expiry"] + if ok { + et, ok := expires.(time.Time) + if ok { + expiresIn = et.Sub(time.Now()) + } + } + + var req *request.Request + + switch methodString { + case "GET": + req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + case "HEAD": + req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{ + Bucket: aws.String(d.Bucket), + Key: aws.String(d.s3Path(path)), + }) + default: + panic("unreachable") + } + + return req.Presign(expiresIn) +} + +func (d *driver) s3Path(path string) string { + return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") +} + +// S3BucketKey returns the s3 bucket key for the given storage driver path. +func (d *Driver) S3BucketKey(path string) string { + return d.StorageDriver.(*driver).s3Path(path) +} + +func parseError(path string, err error) error { + if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" { + return storagedriver.PathNotFoundError{Path: path} + } + + return err +} + +func (d *driver) getEncryptionMode() *string { + if d.Encrypt { + return aws.String("AES256") + } + return nil +} + +func (d *driver) getContentType() *string { + return aws.String("application/octet-stream") +} + +func (d *driver) getACL() *string { + return aws.String("private") +} + +func (d *driver) getStorageClass() *string { + return aws.String(d.StorageClass) +} + +// getbuf returns a buffer from the driver's pool with length d.ChunkSize. +func (d *driver) getbuf() []byte { + return d.pool.Get().([]byte) +} + +func (d *driver) putbuf(p []byte) { + copy(p, d.zeros) + d.pool.Put(p) +} diff --git a/registry/storage/driver/s3-aws/s3_test.go b/registry/storage/driver/s3-aws/s3_test.go new file mode 100644 index 00000000..6816e650 --- /dev/null +++ b/registry/storage/driver/s3-aws/s3_test.go @@ -0,0 +1,201 @@ +package s3 + +import ( + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/docker/distribution/context" + storagedriver "github.com/docker/distribution/registry/storage/driver" + "github.com/docker/distribution/registry/storage/driver/testsuites" + + "gopkg.in/check.v1" +) + +// Hook up gocheck into the "go test" runner. +func Test(t *testing.T) { check.TestingT(t) } + +var s3DriverConstructor func(rootDirectory, storageClass string) (*Driver, error) +var skipS3 func() string + +func init() { + accessKey := os.Getenv("AWS_ACCESS_KEY") + secretKey := os.Getenv("AWS_SECRET_KEY") + bucket := os.Getenv("S3_BUCKET") + encrypt := os.Getenv("S3_ENCRYPT") + secure := os.Getenv("S3_SECURE") + region := os.Getenv("AWS_REGION") + root, err := ioutil.TempDir("", "driver-") + if err != nil { + panic(err) + } + defer os.Remove(root) + + s3DriverConstructor = func(rootDirectory, storageClass string) (*Driver, error) { + encryptBool := false + if encrypt != "" { + encryptBool, err = strconv.ParseBool(encrypt) + if err != nil { + return nil, err + } + } + + secureBool := true + if secure != "" { + secureBool, err = strconv.ParseBool(secure) + if err != nil { + return nil, err + } + } + + parameters := DriverParameters{ + accessKey, + secretKey, + bucket, + region, + encryptBool, + secureBool, + minChunkSize, + rootDirectory, + storageClass, + driverName + "-test", + } + + return New(parameters) + } + + // Skip S3 storage driver tests if environment variable parameters are not provided + skipS3 = func() string { + if accessKey == "" || secretKey == "" || region == "" || bucket == "" || encrypt == "" { + return "Must set AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, S3_BUCKET, and S3_ENCRYPT to run S3 tests" + } + return "" + } + + testsuites.RegisterSuite(func() (storagedriver.StorageDriver, error) { + return s3DriverConstructor(root, s3.StorageClassStandard) + }, skipS3) +} + +func TestEmptyRootList(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + validRoot, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(validRoot) + + rootedDriver, err := s3DriverConstructor(validRoot, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating rooted driver: %v", err) + } + + emptyRootDriver, err := s3DriverConstructor("", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating empty root driver: %v", err) + } + + slashRootDriver, err := s3DriverConstructor("/", s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating slash root driver: %v", err) + } + + filename := "/test" + contents := []byte("contents") + ctx := context.Background() + err = rootedDriver.PutContent(ctx, filename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rootedDriver.Delete(ctx, filename) + + keys, err := emptyRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } + + keys, err = slashRootDriver.List(ctx, "/") + for _, path := range keys { + if !storagedriver.PathRegexp.MatchString(path) { + t.Fatalf("unexpected string in path: %q != %q", path, storagedriver.PathRegexp) + } + } +} + +func TestStorageClass(t *testing.T) { + if skipS3() != "" { + t.Skip(skipS3()) + } + + rootDir, err := ioutil.TempDir("", "driver-") + if err != nil { + t.Fatalf("unexpected error creating temporary directory: %v", err) + } + defer os.Remove(rootDir) + + standardDriver, err := s3DriverConstructor(rootDir, s3.StorageClassStandard) + if err != nil { + t.Fatalf("unexpected error creating driver with standard storage: %v", err) + } + + rrDriver, err := s3DriverConstructor(rootDir, s3.StorageClassReducedRedundancy) + if err != nil { + t.Fatalf("unexpected error creating driver with reduced redundancy storage: %v", err) + } + + standardFilename := "/test-standard" + rrFilename := "/test-rr" + contents := []byte("contents") + ctx := context.Background() + + err = standardDriver.PutContent(ctx, standardFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer standardDriver.Delete(ctx, standardFilename) + + err = rrDriver.PutContent(ctx, rrFilename, contents) + if err != nil { + t.Fatalf("unexpected error creating content: %v", err) + } + defer rrDriver.Delete(ctx, rrFilename) + + standardDriverUnwrapped := standardDriver.Base.StorageDriver.(*driver) + resp, err := standardDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(standardDriverUnwrapped.Bucket), + Key: aws.String(standardDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving standard storage file: %v", err) + } + defer resp.Body.Close() + // Amazon only populates this header value for non-standard storage classes + if resp.StorageClass != nil { + t.Fatalf("unexpected storage class for standard file: %v", resp.StorageClass) + } + + rrDriverUnwrapped := rrDriver.Base.StorageDriver.(*driver) + resp, err = rrDriverUnwrapped.S3.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(rrDriverUnwrapped.Bucket), + Key: aws.String(rrDriverUnwrapped.s3Path(standardFilename)), + }) + if err != nil { + t.Fatalf("unexpected error retrieving reduced-redundancy storage file: %v", err) + } + defer resp.Body.Close() + if resp.StorageClass == nil { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", s3.StorageClassStandard) + } else if *resp.StorageClass != s3.StorageClassReducedRedundancy { + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", *resp.StorageClass) + } + +} diff --git a/registry/storage/driver/s3/s3.go b/registry/storage/driver/s3-goamz/s3.go similarity index 98% rename from registry/storage/driver/s3/s3.go rename to registry/storage/driver/s3-goamz/s3.go index a1f4c57d..9208965b 100644 --- a/registry/storage/driver/s3/s3.go +++ b/registry/storage/driver/s3-goamz/s3.go @@ -2,16 +2,14 @@ // store blobs in Amazon S3 cloud storage. // // This package leverages the docker/goamz client library for interfacing with -// s3. +// S3. It is intended to be deprecated in favor of the s3-aws driver +// implementation. // -// Because s3 is a key, value store the Stat call does not support last modification +// Because S3 is a key, value store the Stat call does not support last modification // time for directories (directories are an abstraction for key, value stores) // -// Keep in mind that s3 guarantees only eventual consistency, so do not assume -// that a successful write will mean immediate access to the data written (although -// in most regions a new object put has guaranteed read after write). The only true -// guarantee is that once you call Stat and receive a certain file size, that much of -// the file is already accessible. +// Keep in mind that S3 guarantees only read-after-write consistency for new +// objects, but no read-after-update or list-after-write consistency. package s3 import ( @@ -37,7 +35,7 @@ import ( "github.com/docker/distribution/registry/storage/driver/factory" ) -const driverName = "s3" +const driverName = "s3goamz" // minChunkSize defines the minimum multipart upload chunk size // S3 API requires multipart upload chunks to be at least 5MB diff --git a/registry/storage/driver/s3/s3_test.go b/registry/storage/driver/s3-goamz/s3_test.go similarity index 98% rename from registry/storage/driver/s3/s3_test.go rename to registry/storage/driver/s3-goamz/s3_test.go index 660d5350..352ec3f5 100644 --- a/registry/storage/driver/s3/s3_test.go +++ b/registry/storage/driver/s3-goamz/s3_test.go @@ -71,7 +71,7 @@ func init() { minChunkSize, rootDirectory, storageClass, - "", + driverName + "-test", } return New(parameters) @@ -196,6 +196,6 @@ func TestStorageClass(t *testing.T) { } defer resp.Body.Close() if storageClass := resp.Header.Get("x-amz-storage-class"); storageClass != string(s3.ReducedRedundancy) { - t.Fatalf("unexpected storage class for standard file: %v", storageClass) + t.Fatalf("unexpected storage class for reduced-redundancy file: %v", storageClass) } }